diff --git a/go.mod b/go.mod index 808861376..660af2767 100644 --- a/go.mod +++ b/go.mod @@ -4,22 +4,22 @@ go 1.17 require ( github.com/client9/misspell v0.3.4 - github.com/spf13/cobra v1.1.3 + github.com/spf13/cobra v1.2.1 github.com/spf13/pflag v1.0.5 - k8s.io/api v0.22.0 - k8s.io/apimachinery v0.22.0 - k8s.io/apiserver v0.22.0 - k8s.io/client-go v0.22.0 - k8s.io/code-generator v0.22.0 - k8s.io/component-base v0.22.0 - k8s.io/component-helpers v0.22.0 - k8s.io/klog/v2 v2.9.0 + k8s.io/api v0.23.0 + k8s.io/apimachinery v0.23.0 + k8s.io/apiserver v0.23.0 + k8s.io/client-go v0.23.0 + k8s.io/code-generator v0.23.0 + k8s.io/component-base v0.23.0 + k8s.io/component-helpers v0.23.0 + k8s.io/klog/v2 v2.30.0 k8s.io/kubectl v0.20.5 sigs.k8s.io/mdtoc v1.0.1 ) require ( - cloud.google.com/go v0.54.0 // indirect + cloud.google.com/go v0.81.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.18 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect @@ -37,10 +37,11 @@ require ( github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful v2.9.5+incompatible // indirect - github.com/evanphx/json-patch v4.11.0+incompatible // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect - github.com/go-logr/logr v0.4.0 // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/go-logr/logr v1.2.0 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/swag v0.19.14 // indirect @@ -57,17 +58,17 @@ require ( github.com/imdario/mergo v0.3.5 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.11 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mmarkdown/mmark v2.0.40+incompatible // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.11.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.26.0 // indirect + github.com/prometheus/common v0.28.0 // indirect github.com/prometheus/procfs v0.6.0 // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect @@ -85,30 +86,31 @@ require ( go.opentelemetry.io/proto/otlp v0.7.0 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect - go.uber.org/zap v1.17.0 // indirect - golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 // indirect - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect + golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect - golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect - golang.org/x/text v0.3.6 // indirect + golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e // indirect + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect + golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - golang.org/x/tools v0.1.2 // indirect + golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/appengine v1.6.5 // indirect - google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect - google.golang.org/grpc v1.38.0 // indirect - google.golang.org/protobuf v1.26.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect + google.golang.org/grpc v1.40.0 // indirect + google.golang.org/protobuf v1.27.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 // indirect - k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e // indirect - k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect + k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect + k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect + k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 // indirect + sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index 62c0263fe..6adab266d 100644 --- a/go.sum +++ b/go.sum @@ -8,20 +8,35 @@ cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -67,14 +82,16 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -91,7 +108,9 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= @@ -126,12 +145,14 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -144,6 +165,7 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -158,8 +180,9 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= @@ -192,11 +215,15 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -222,8 +249,11 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -231,11 +261,19 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= @@ -247,6 +285,7 @@ github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2c github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -280,6 +319,7 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -292,8 +332,9 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -305,6 +346,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -316,6 +358,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -336,6 +379,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= @@ -345,8 +389,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -373,11 +418,13 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -397,8 +444,9 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -428,18 +476,23 @@ github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -457,9 +510,12 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4 github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= @@ -481,6 +537,9 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE= @@ -514,8 +573,9 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -523,11 +583,12 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -550,6 +611,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= @@ -560,6 +622,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -578,6 +642,7 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -587,27 +652,50 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= @@ -644,26 +732,42 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e h1:XMgFehsDnnLGtjvjOfqWSUzt0alpTR1RSEuznObga2c= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -671,8 +775,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -716,13 +821,31 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -737,13 +860,27 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -761,13 +898,34 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -776,12 +934,21 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -793,8 +960,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -806,6 +974,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -835,56 +1004,61 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.20.5/go.mod h1:FQjAceXnVaWDeov2YUWhOb6Yt+5UjErkp6UO3nczO1Y= -k8s.io/api v0.22.0 h1:elCpMZ9UE8dLdYxr55E06TmSeji9I3KH494qH70/y+c= -k8s.io/api v0.22.0/go.mod h1:0AoXXqst47OI/L0oGKq9DG61dvGRPXs7X4/B7KyjBCU= +k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= +k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/apimachinery v0.20.5/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.22.0 h1:CqH/BdNAzZl+sr3tc0D3VsK3u6ARVSo3GWyLmfIjbP0= -k8s.io/apimachinery v0.22.0/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= -k8s.io/apiserver v0.22.0 h1:KZh2asnRBjawLLfPOi6qiD+A2jaNt31HCnZG6AX3Qcs= -k8s.io/apiserver v0.22.0/go.mod h1:04kaIEzIQrTGJ5syLppQWvpkLJXQtJECHmae+ZGc/nc= +k8s.io/apimachinery v0.23.0 h1:mIfWRMjBuMdolAWJ3Fd+aPTMv3X9z+waiARMpvvb0HQ= +k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= +k8s.io/apiserver v0.23.0 h1:Ds/QveXWi9aJ8ISB0CJa4zBNc5njxAs5u3rmMIexqCY= +k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= k8s.io/cli-runtime v0.20.5/go.mod h1:ihjPeQWDk7NGVIkNEvpwxA3gJvqtU+LtkDj11TvyXn4= k8s.io/client-go v0.20.5/go.mod h1:Ee5OOMMYvlH8FCZhDsacjMlCBwetbGZETwo1OA+e6Zw= -k8s.io/client-go v0.22.0 h1:sD6o9O6tCwUKCENw8v+HFsuAbq2jCu8cWC61/ydwA50= -k8s.io/client-go v0.22.0/go.mod h1:GUjIuXR5PiEv/RVK5OODUsm6eZk7wtSWZSaSJbpFdGg= +k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY= +k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= k8s.io/code-generator v0.20.5/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= -k8s.io/code-generator v0.22.0 h1:wIo+6NuAEf+aP6dblF+fPJOkY/VnM6wqNHusiW/eQ3o= -k8s.io/code-generator v0.22.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/code-generator v0.23.0 h1:lhyd2KJVCEmpjaCpuoooGs+e3xhPwpYvupnNRidO0Ds= +k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= k8s.io/component-base v0.20.5/go.mod h1:l0isoBLGyQKwRoTWbPHR6jNDd3/VqQD43cNlsjddGng= -k8s.io/component-base v0.22.0 h1:ZTmX8hUqH9T9gc0mM42O+KDgtwTYbVTt2MwmLP0eK8A= -k8s.io/component-base v0.22.0/go.mod h1:SXj6Z+V6P6GsBhHZVbWCw9hFjUdUYnJerlhhPnYCBCg= +k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8= +k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= k8s.io/component-helpers v0.20.5/go.mod h1:AzTdoPj6YAN2SUfhBX/FUUU3ntfFuse03q/VMLovEsE= -k8s.io/component-helpers v0.22.0 h1:OoTOtxTkg/T16FRS1K/WfABzxliTCq3RTbFHMBSod/o= -k8s.io/component-helpers v0.22.0/go.mod h1:YNIbQI59ayNiU8JHlPIxVkOUYycbKhk5Niy0pcyJOEY= +k8s.io/component-helpers v0.23.0 h1:qNbqN10QTefiWcCOPkHL/0nn81sdKVv6ZgEXcSyot/U= +k8s.io/component-helpers v0.23.0/go.mod h1:liXMh6FZS4qamKtMJQ7uLHnFe3tlC86RX5mJEk/aerg= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= -k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kubectl v0.20.5 h1:/wndy8hw5TsL8G8KWPDJrtPKS8D34uSdWS0BMRmtzWs= k8s.io/kubectl v0.20.5/go.mod h1:mlNQgyV18D4XFt5BmfSkrxQNS+arT2pXDQxxnH5lMiw= k8s.io/metrics v0.20.5/go.mod h1:vsptOayjKWKWHvWR1vFQY++vxydzaEo/2+JC7kSDKPU= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 h1:DEQ12ZRxJjsglk5JIi5bLgpKaHihGervKmg5uryaEHw= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/mdtoc v1.0.1 h1:6ECKhQnbetwZBR6R2IeT2LH+1w+2Zsip0iXjikgaXIk= sigs.k8s.io/mdtoc v1.0.1/go.mod h1:COYBtOjsaCg7o7SC4eaLwEXPuVRSuiVuLLRrHd7kShw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 1a7a4c7e5..545bd9d37 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -140,7 +140,7 @@ func testOnGCE() bool { }() go func() { - addrs, err := net.LookupHost("metadata.google.internal") + addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal") if err != nil || len(addrs) == 0 { resc <- false return @@ -296,6 +296,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { // being stable anyway. host = metadataIP } + suffix = strings.TrimLeft(suffix, "/") u := "http://" + host + "/computeMetadata/v1/" + suffix req, err := http.NewRequest("GET", u, nil) if err != nil { diff --git a/vendor/github.com/evanphx/json-patch/.gitignore b/vendor/github.com/evanphx/json-patch/.gitignore new file mode 100644 index 000000000..b7ed7f956 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.gitignore @@ -0,0 +1,6 @@ +# editor and IDE paraphernalia +.idea +.vscode + +# macOS paraphernalia +.DS_Store diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index 182985490..dc2b7e51e 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -412,6 +412,17 @@ func (d *partialArray) set(key string, val *lazyNode) error { if err != nil { return err } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(*d) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(*d) + } + (*d)[idx] = val return nil } @@ -462,6 +473,16 @@ func (d *partialArray) get(key string) (*lazyNode, error) { return nil, err } + if idx < 0 { + if !SupportNegativeIndices { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(*d) + } + if idx >= len(*d) { return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } @@ -547,6 +568,29 @@ func (p Patch) replace(doc *container, op Operation) error { return errors.Wrapf(err, "replace operation failed to decode path") } + if path == "" { + val := op.value() + + if val.which == eRaw { + if !val.tryDoc() { + if !val.tryAry() { + return errors.Wrapf(err, "replace operation value must be object or array") + } + } + } + + switch val.which { + case eAry: + *doc = &val.ary + case eDoc: + *doc = &val.doc + case eRaw: + return errors.Wrapf(err, "replace operation hit impossible case") + } + + return nil + } + con, key := findObject(doc, path) if con == nil { @@ -613,6 +657,25 @@ func (p Patch) test(doc *container, op Operation) error { return errors.Wrapf(err, "test operation failed to decode path") } + if path == "" { + var self lazyNode + + switch sv := (*doc).(type) { + case *partialDoc: + self.doc = *sv + self.which = eDoc + case *partialArray: + self.ary = *sv + self.which = eAry + } + + if self.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + con, key := findObject(doc, path) if con == nil { diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 000000000..fad895851 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 000000000..32f1001be --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 000000000..4cd0cbaf4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml new file mode 100644 index 000000000..a9c30165c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -0,0 +1,36 @@ +sudo: false +language: go + +go: + - "stable" + - "1.11.x" + - "1.10.x" + - "1.9.x" + +matrix: + include: + - go: "stable" + env: GOLINT=true + allow_failures: + - go: tip + fast_finish: true + + +before_install: + - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi + +script: + - go test --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi + - go vet ./... + +os: + - linux + - osx + - windows + +notifications: + email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 000000000..5ab5d41c5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 000000000..be4d7ea2c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,317 @@ +# Changelog + +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## v1.4.2 / 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## v1.4.1 / 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## v1.4.0 / 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## v1.3.1 / 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## v1.3.0 / 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## v1.2.10 / 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## v1.2.9 / 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## v1.2.8 / 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## v1.2.5 / 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## v1.2.1 / 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 000000000..828a60b24 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 000000000..e180c8fb0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 000000000..b2629e522 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,130 @@ +# File system notifications for Go + +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) + +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: + +```console +go get -u golang.org/x/sys/... +``` + +Cross platform: Windows, Linux, BSD and macOS. + +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | +| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 000000000..ced39cb88 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 000000000..89cab046d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 000000000..d9fd1b88a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 000000000..b33f2b4d4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 000000000..86e76a3d6 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 000000000..2306c4620 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 000000000..870c4d6d1 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 000000000..09436f31d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml new file mode 100644 index 000000000..94ff801df --- /dev/null +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -0,0 +1,29 @@ +run: + timeout: 1m + tests: true + +linters: + disable-all: true + enable: + - asciicheck + - deadcode + - errcheck + - forcetypeassert + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 000000000..c35696004 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 000000000..5d37e294c --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index e9b5520a1..ad825f5f0 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,112 +1,182 @@ -# A more minimal logging API for Go +# A minimal logging API for Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) + +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: + +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } + + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +### Inspiration Before you consider this package, please read [this blog post by the -inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what -he has to say, and it largely aligns with my own experiences. Too many -choices of levels means inconsistent logs. +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. -This package offers a purely abstract interface, based on these ideas but with -a few twists. Code can depend on just this interface and have the actual -logging implementation be injected from callers. Ideally only `main()` knows -what logging implementation is being used. - -# Differences from Dave's ideas +### Differences from Dave's ideas The main differences are: -1) Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. I disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. I -restrict the API to just 2 types of logs: info and error. +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. Info logs are things you want to tell the user which are not errors. Error logs are, well, errors. If your code receives an `error` from a subordinate function call and is logging that `error` *and not returning it*, use error logs. -2) Verbosity-levels on info logs. This gives developers a chance to indicate +2. Verbosity-levels on info logs. This gives developers a chance to indicate arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug". Superficially this +semantic meaning such as "warning", "trace", and "debug." Superficially this may feel very similar, but the primary difference is the lack of semantics. Because verbosity is a numerical value, it's safe to assume that an app running with higher verbosity means more (and less important) logs will be generated. -This is a BETA grade API. +## Implementations (non-exhaustive) There are implementations for the following logging libraries: +- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) -- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) +- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) -- **log** (the Go standard library logger): - [stdr](https://github.com/go-logr/stdr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) +- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) -# FAQ +## FAQ -## Conceptual +### Conceptual -## Why structured logging? +#### Why structured logging? -- **Structured logs are more easily queriable**: Since you've got +- **Structured logs are more easily queryable**: Since you've got key-value pairs, it's much easier to query your structured logs for particular values by filtering on the contents of a particular key -- think searching request logs for error codes, Kubernetes reconcilers for - the name and namespace of the reconciled object, etc + the name and namespace of the reconciled object, etc. -- **Structured logging makes it easier to have cross-referencable logs**: +- **Structured logging makes it easier to have cross-referenceable logs**: Similarly to searchability, if you maintain conventions around your keys, it becomes easy to gather all log lines related to a particular concept. - + - **Structured logs allow better dimensions of filtering**: if you have structure to your logs, you've got more precise control over how much information is logged -- you might choose in a particular configuration to log certain keys but not others, only log lines where a certain key - matches a certain value, etc, instead of just having v-levels and names + matches a certain value, etc., instead of just having v-levels and names to key off of. - **Structured logs better represent structured data**: sometimes, the data that you want to log is inherently structured (think tuple-link - objects). Structured logs allow you to preserve that structure when + objects.) Structured logs allow you to preserve that structure when outputting. -## Why V-levels? +#### Why V-levels? **V-levels give operators an easy way to control the chattiness of log operations**. V-levels provide a way for a given package to distinguish the relative importance or verbosity of a given log message. Then, if a particular logger or package is logging too many messages, the user -of the package can simply change the v-levels for that library. +of the package can simply change the v-levels for that library. -## Why not more named levels, like Warning? +#### Why not named levels, like Info/Warning/Error? Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences from Dave's ideas](#differences-from-daves-ideas). -## Why not allow format strings, too? +#### Why not allow format strings, too? **Format strings negate many of the benefits of structured logs**: - They're not easily searchable without resorting to fuzzy searching, - regular expressions, etc + regular expressions, etc. - They don't store structured data well, since contents are flattened into - a string + a string. -- They're not cross-referencable +- They're not cross-referenceable. -- They don't compress easily, since the message is not constant +- They don't compress easily, since the message is not constant. -(unless you turn positional parameters into key-value pairs with numerical +(Unless you turn positional parameters into key-value pairs with numerical keys, at which point you've gotten key-value logging with meaningless -keys) +keys.) -## Practical +### Practical -## Why key-value pairs, and not a map? +#### Why key-value pairs, and not a map? Key-value pairs are *much* easier to optimize, especially around allocations. Zap (a structured logger that inspired logr's interface) has @@ -117,26 +187,26 @@ While the interface ends up being a little less obvious, you get potentially better performance, plus avoid making users type `map[string]string{}` every time they want to log. -## What if my V-levels differ between libraries? +#### What if my V-levels differ between libraries? That's fine. Control your V-levels on a per-logger basis, and use the -`WithName` function to pass different loggers to different libraries. +`WithName` method to pass different loggers to different libraries. Generally, you should take care to ensure that you have relatively consistent V-levels within a given logger, however, as this makes deciding on what verbosity of logs to request easier. -## But I *really* want to use a format string! +#### But I really want to use a format string! That's not actually a question. Assuming your question is "how do I convert my mental model of logging with format strings to logging with constant messages": -1. figure out what the error actually is, as you'd write in a TL;DR style, - and use that as a message +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. 2. For every place you'd write a format specifier, look to the word before - it, and add that as a key value pair + it, and add that as a key value pair. For instance, consider the following examples (all taken from spots in the Kubernetes codebase): @@ -150,34 +220,59 @@ Kubernetes codebase): response when requesting url", "attempt", retries, "after seconds", seconds, "url", url)` -If you *really* must use a format string, place it as a key value, and -call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to reflect over type %T")` becomes `logger.Info("unable to reflect over type", "type", fmt.Sprintf("%T"))`. In general though, the cases where this is necessary should be few and far between. -## How do I choose my V-levels? +#### How do I choose my V-levels? This is basically the only hard constraint: increase V-levels to denote more verbose or more debug-y logs. Otherwise, you can start out with `0` as "you always want to see this", `1` as "common logging that you might *possibly* want to turn off", and -`10` as "I would like to performance-test your log collection stack". +`10` as "I would like to performance-test your log collection stack." Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs). +info-type logs.) -## How do I choose my keys +#### How do I choose my keys? -- make your keys human-readable -- constant keys are generally a good idea -- be consistent across your codebase -- keys should naturally match parts of the message string +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. + +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). While key names are mostly unrestricted (and spaces are acceptable), it's generally a good idea to stick to printable ascii characters, or at least match the general character set of your log lines. +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + [warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go index 2bafb13d1..9d92a38f1 100644 --- a/vendor/github.com/go-logr/logr/discard.go +++ b/vendor/github.com/go-logr/logr/discard.go @@ -16,36 +16,39 @@ limitations under the License. package logr -// Discard returns a valid Logger that discards all messages logged to it. -// It can be used whenever the caller is not interested in the logs. +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. func Discard() Logger { - return DiscardLogger{} + return Logger{ + level: 0, + sink: discardLogSink{}, + } } -// DiscardLogger is a Logger that discards all messages. -type DiscardLogger struct{} +// discardLogSink is a LogSink that discards all messages. +type discardLogSink struct{} -func (l DiscardLogger) Enabled() bool { +// Verify that it actually implements the interface +var _ LogSink = discardLogSink{} + +func (l discardLogSink) Init(RuntimeInfo) { +} + +func (l discardLogSink) Enabled(int) bool { return false } -func (l DiscardLogger) Info(msg string, keysAndValues ...interface{}) { +func (l discardLogSink) Info(int, string, ...interface{}) { } -func (l DiscardLogger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l discardLogSink) Error(error, string, ...interface{}) { } -func (l DiscardLogger) V(level int) Logger { +func (l discardLogSink) WithValues(...interface{}) LogSink { return l } -func (l DiscardLogger) WithValues(keysAndValues ...interface{}) Logger { +func (l discardLogSink) WithName(string) LogSink { return l } - -func (l DiscardLogger) WithName(name string) Logger { - return l -} - -// Verify that it actually implements the interface -var _ Logger = DiscardLogger{} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 842428bd3..44cd398c9 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -16,83 +16,101 @@ limitations under the License. // This design derives from Dave Cheney's blog: // http://dave.cheney.net/2015/11/05/lets-talk-about-logging -// -// This is a BETA grade API. Until there is a significant 2nd implementation, -// I don't really know how it will change. -// Package logr defines abstract interfaces for logging. Packages can depend on -// these interfaces and callers can implement logging in whatever way is -// appropriate. +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. // // Usage // -// Logging is done using a Logger. Loggers can have name prefixes and named -// values attached, so that all log messages logged with that Logger have some -// base context associated. +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". // -// The term "key" is used to refer to the name associated with a particular -// value, to disambiguate it from the general Logger name. +// With Go's standard log package, we might write: +// log.Printf("setting target value %s", targetValue) // -// For instance, suppose we're trying to reconcile the state of an object, and -// we want to log that we've made some decision. +// With logr's structured logging, we'd write: +// logger.Info("setting target", "value", targetValue) // -// With the traditional log package, we might write: +// Errors are much the same. Instead of: +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// We'd write: +// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). +// +// Verbosity +// +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// +// Where we might have written: +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } +// +// We can write: +// logger.V(2).Info("an unusual thing happened") +// +// Logger Names +// +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: +// +// logger.WithName("compactor").Info("started", "time", time.Now()) +// +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). +// +// Saved Values +// +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: +// +// With the standard log package, we might write: // log.Printf("decided to set field foo to value %q for object %s/%s", // targetValue, object.Namespace, object.Name) // -// With logr's structured logging, we'd write: -// // elsewhere in the file, set up the logger to log with the prefix of -// // "reconcilers", and the named value target-type=Foo, for extra context. -// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") +// With logr we'd write: +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) // // // later on... -// log.Info("setting foo on object", "value", targetValue, "object", object) +// obj.logger.Info("setting foo", "value", targetValue) // -// Depending on our logging implementation, we could then make logging decisions -// based on field values (like only logging such events for objects in a certain -// namespace), or copy the structured information into a structured log store. +// Best Practices // -// For logging errors, Logger has a method called Error. Suppose we wanted to -// log an error while reconciling. With the traditional log package, we might -// write: -// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) -// -// With logr, we'd instead write: -// // assuming the above setup for log -// log.Error(err, "unable to reconcile object", "object", object) -// -// This functions similarly to: -// log.Info("unable to reconcile object", "error", err, "object", object) -// -// However, it ensures that a standard key for the error value ("error") is used -// across all error logging. Furthermore, certain implementations may choose to -// attach additional information (such as stack traces) on calls to Error, so -// it's preferred to use Error to log errors. -// -// Parts of a log line -// -// Each log message from a Logger has four types of context: -// logger name, log verbosity, log message, and the named values. -// -// The Logger name consists of a series of name "segments" added by successive -// calls to WithName. These name segments will be joined in some way by the -// underlying implementation. It is strongly recommended that name segments -// contain simple identifiers (letters, digits, and hyphen), and do not contain -// characters that could muddle the log output or confuse the joining operation -// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). -// -// Log verbosity represents how little a log matters. Level zero, the default, -// matters most. Increasing levels matter less and less. Try to avoid lots of -// different verbosity levels, and instead provide useful keys, logger names, -// and log messages for users to filter on. It's illegal to pass a log level -// below zero. +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. // // The log message consists of a constant message attached to the log line. // This should generally be a simple description of what's occurring, and should -// never be a format string. +// never be a format string. Variable information can then be attached using +// named values. // -// Variable information can then be attached using named values (key/value -// pairs). Keys are arbitrary strings, while values may be any Go value. +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. // // Key Naming Conventions // @@ -102,6 +120,7 @@ limitations under the License. // * be constant (not dependent on input data) // * contain only printable characters // * not contain whitespace or punctuation +// * use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -110,21 +129,22 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// -// * `"caller"`: the calling information (file/line) of a particular log line. -// * `"error"`: the underlying error value in the `Error` method. -// * `"level"`: the log level. -// * `"logger"`: the name of the associated logger. -// * `"msg"`: the log message. -// * `"stacktrace"`: the stack trace associated with a particular log line or -// error (often from the `Error` message). -// * `"ts"`: the timestamp for a log line. +// * "caller": the calling information (file/line) of a particular log line +// * "error": the underlying error value in the `Error` method +// * "level": the log level +// * "logger": the name of the associated logger +// * "msg": the log message +// * "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// * "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // +// Break Glass +// // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: // // Underlier exposes access to the underlying logging implementation. @@ -134,81 +154,220 @@ limitations under the License. // type Underlier interface { // GetUnderlying() // } +// +// Logger grants access to the sink to enable type assertions like this: +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink()(impl.Underlier) { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. package logr import ( "context" ) -// TODO: consider adding back in format strings if they're really needed -// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) -// TODO: consider other bits of glog functionality like Flush, OutputStats - -// Logger represents the ability to log messages, both errors and not. -type Logger interface { - // Enabled tests whether this Logger is enabled. For example, commandline - // flags might be used to set the logging verbosity and disable some info - // logs. - Enabled() bool - - // Info logs a non-error message with the given key/value pairs as context. - // - // The msg argument should be used to add some constant description to - // the log line. The key/value pairs can then be used to add additional - // variable information. The key/value pairs should alternate string - // keys and arbitrary values. - Info(msg string, keysAndValues ...interface{}) - - // Error logs an error, with the given message and key/value pairs as context. - // It functions similarly to calling Info with the "error" named value, but may - // have unique behavior, and should be preferred for logging errors (see the - // package documentations for more information). - // - // The msg field should be used to add context to any underlying error, - // while the err field should be used to attach the actual error that - // triggered this log line, if present. - Error(err error, msg string, keysAndValues ...interface{}) - - // V returns an Logger value for a specific verbosity level, relative to - // this Logger. In other words, V values are additive. V higher verbosity - // level means a log message is less important. It's illegal to pass a log - // level less than zero. - V(level int) Logger - - // WithValues adds some key-value pairs of context to a logger. - // See Info for documentation on how key/value pairs work. - WithValues(keysAndValues ...interface{}) Logger - - // WithName adds a new element to the logger's name. - // Successive calls with WithName continue to append - // suffixes to the logger's name. It's strongly recommended - // that name segments contain only letters, digits, and hyphens - // (see the package documentation for more information). - WithName(name string) Logger +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + sink.Init(runtimeInfo) + return logger } -// InfoLogger provides compatibility with code that relies on the v0.1.0 -// interface. -// -// Deprecated: InfoLogger is an artifact of early versions of this API. New -// users should never use it and existing users should use Logger instead. This -// will be removed in a future release. -type InfoLogger = Logger +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + return l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.Enabled() { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. +func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if level < 0 { + level = 0 + } + l.level += level + return l +} + +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + l.setSink(l.sink.WithValues(keysAndValues...)) + return l +} + +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + l.setSink(l.sink.WithName(name)) + return l +} + +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. +// +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} + +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} + +// contextKey is how we find Loggers in a context.Context. type contextKey struct{} -// FromContext returns a Logger constructed from ctx or nil if no -// logger details are found. -func FromContext(ctx context.Context) Logger { +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v + return v, nil } - return nil + return Logger{}, notFoundError{} } -// FromContextOrDiscard returns a Logger constructed from ctx or a Logger -// that discards all messages if no logger details are found. +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. func FromContextOrDiscard(ctx context.Context) Logger { if v, ok := ctx.Value(contextKey{}).(Logger); ok { return v @@ -217,12 +376,59 @@ func FromContextOrDiscard(ctx context.Context) Logger { return Discard() } -// NewContext returns a new context derived from ctx that embeds the Logger. -func NewContext(ctx context.Context, l Logger) context.Context { - return context.WithValue(ctx, contextKey{}, l) +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) } -// CallDepthLogger represents a Logger that knows how to climb the call stack +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...interface{}) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...interface{}) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a Logger that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -232,35 +438,59 @@ func NewContext(ctx context.Context, l Logger) context.Context { // // This is an optional interface and implementations are not required to // support it. -type CallDepthLogger interface { - Logger - - // WithCallDepth returns a Logger that will offset the call stack by the - // specified number of frames when logging call site information. If depth - // is 0 the attribution should be to the direct caller of this method. If - // depth is 1 the attribution should skip 1 call frame, and so on. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. // Successive calls to this are additive. - WithCallDepth(depth int) Logger + WithCallDepth(depth int) LogSink } -// WithCallDepth returns a Logger that will offset the call stack by the -// specified number of frames when logging call site information, if possible. -// This is useful for users who have helper functions between the "real" call -// site and the actual calls to Logger methods. If depth is 0 the attribution -// should be to the direct caller of this function. If depth is 1 the -// attribution should skip 1 call frame, and so on. Successive calls to this -// are additive. +// CallStackHelperLogSink represents a Logger that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. // -// If the underlying log implementation supports the CallDepthLogger interface, -// the WithCallDepth method will be called and the result returned. If the -// implementation does not support CallDepthLogger, the original Logger will be -// returned. +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. // -// Callers which care about whether this was supported or not should test for -// CallDepthLogger support themselves. -func WithCallDepth(logger Logger, depth int) Logger { - if decorator, ok := logger.(CallDepthLogger); ok { - return decorator.WithCallDepth(depth) - } - return logger +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() +} + +// Marshaler is an optional interface that logged values may choose to +// implement. Loggers with structured output, such as JSON, should +// log the object return by the MarshalLog method instead of the +// original value. +type Marshaler interface { + // MarshalLog can be used to: + // - ensure that structs are not logged as strings when the original + // value has a String method: return a different type without a + // String method + // - select which fields of a complex type should get logged: + // return a simpler struct with fewer fields + // - log unexported fields: return a different struct + // with exported fields + // + // It may return any value of any type. + MarshalLog() interface{} } diff --git a/vendor/github.com/googleapis/gnostic/openapiv3/OpenAPIv3.go b/vendor/github.com/googleapis/gnostic/openapiv3/OpenAPIv3.go new file mode 100644 index 000000000..607e33c74 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv3/OpenAPIv3.go @@ -0,0 +1,8598 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v3 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v3" + "regexp" + "strings" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v3" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // SchemaOrReference schema_or_reference = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchemaOrReference(m, compiler.NewContext("schemaOrReference", m, context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_SchemaOrReference{SchemaOrReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := compiler.BoolForScalarNode(in) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + matched = true + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes := compiler.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAnyOrExpression creates an object of type AnyOrExpression if possible, returning an error if not. +func NewAnyOrExpression(in *yaml.Node, context *compiler.Context) (*AnyOrExpression, error) { + errors := make([]error, 0) + x := &AnyOrExpression{} + matched := false + // Any any = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewAny(m, compiler.NewContext("any", m, context)) + if matchingError == nil { + x.Oneof = &AnyOrExpression_Any{Any: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Expression expression = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewExpression(m, compiler.NewContext("expression", m, context)) + if matchingError == nil { + x.Oneof = &AnyOrExpression_Expression{Expression: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid AnyOrExpression") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewCallback creates an object of type Callback if possible, returning an error if not. +func NewCallback(in *yaml.Node, context *compiler.Context) (*Callback, error) { + errors := make([]error, 0) + x := &Callback{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedPathItem path = 1; + // MAP: PathItem ^ + x.Path = make([]*NamedPathItem, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if true { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + // repeated NamedAny specification_extension = 2; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewCallbackOrReference creates an object of type CallbackOrReference if possible, returning an error if not. +func NewCallbackOrReference(in *yaml.Node, context *compiler.Context) (*CallbackOrReference, error) { + errors := make([]error, 0) + x := &CallbackOrReference{} + matched := false + // Callback callback = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewCallback(m, compiler.NewContext("callback", m, context)) + if matchingError == nil { + x.Oneof = &CallbackOrReference_Callback{Callback: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &CallbackOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid CallbackOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewCallbacksOrReferences creates an object of type CallbacksOrReferences if possible, returning an error if not. +func NewCallbacksOrReferences(in *yaml.Node, context *compiler.Context) (*CallbacksOrReferences, error) { + errors := make([]error, 0) + x := &CallbacksOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedCallbackOrReference additional_properties = 1; + // MAP: CallbackOrReference + x.AdditionalProperties = make([]*NamedCallbackOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedCallbackOrReference{} + pair.Name = k + var err error + pair.Value, err = NewCallbackOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewComponents creates an object of type Components if possible, returning an error if not. +func NewComponents(in *yaml.Node, context *compiler.Context) (*Components, error) { + errors := make([]error, 0) + x := &Components{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"callbacks", "examples", "headers", "links", "parameters", "requestBodies", "responses", "schemas", "securitySchemes"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // SchemasOrReferences schemas = 1; + v1 := compiler.MapValueForKey(m, "schemas") + if v1 != nil { + var err error + x.Schemas, err = NewSchemasOrReferences(v1, compiler.NewContext("schemas", v1, context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponsesOrReferences responses = 2; + v2 := compiler.MapValueForKey(m, "responses") + if v2 != nil { + var err error + x.Responses, err = NewResponsesOrReferences(v2, compiler.NewContext("responses", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // ParametersOrReferences parameters = 3; + v3 := compiler.MapValueForKey(m, "parameters") + if v3 != nil { + var err error + x.Parameters, err = NewParametersOrReferences(v3, compiler.NewContext("parameters", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExamplesOrReferences examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamplesOrReferences(v4, compiler.NewContext("examples", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // RequestBodiesOrReferences request_bodies = 5; + v5 := compiler.MapValueForKey(m, "requestBodies") + if v5 != nil { + var err error + x.RequestBodies, err = NewRequestBodiesOrReferences(v5, compiler.NewContext("requestBodies", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // HeadersOrReferences headers = 6; + v6 := compiler.MapValueForKey(m, "headers") + if v6 != nil { + var err error + x.Headers, err = NewHeadersOrReferences(v6, compiler.NewContext("headers", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // SecuritySchemesOrReferences security_schemes = 7; + v7 := compiler.MapValueForKey(m, "securitySchemes") + if v7 != nil { + var err error + x.SecuritySchemes, err = NewSecuritySchemesOrReferences(v7, compiler.NewContext("securitySchemes", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // LinksOrReferences links = 8; + v8 := compiler.MapValueForKey(m, "links") + if v8 != nil { + var err error + x.Links, err = NewLinksOrReferences(v8, compiler.NewContext("links", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // CallbacksOrReferences callbacks = 9; + v9 := compiler.MapValueForKey(m, "callbacks") + if v9 != nil { + var err error + x.Callbacks, err = NewCallbacksOrReferences(v9, compiler.NewContext("callbacks", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 10; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 4; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefaultType creates an object of type DefaultType if possible, returning an error if not. +func NewDefaultType(in *yaml.Node, context *compiler.Context) (*DefaultType, error) { + errors := make([]error, 0) + x := &DefaultType{} + matched := false + switch in.Tag { + case "!!bool": + var v bool + v, matched = compiler.BoolForScalarNode(in) + x.Oneof = &DefaultType_Boolean{Boolean: v} + case "!!str": + var v string + v, matched = compiler.StringForScalarNode(in) + x.Oneof = &DefaultType_String_{String_: v} + case "!!float": + var v float64 + v, matched = compiler.FloatForScalarNode(in) + x.Oneof = &DefaultType_Number{Number: v} + case "!!int": + var v int64 + v, matched = compiler.IntForScalarNode(in) + x.Oneof = &DefaultType_Number{Number: float64(v)} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDiscriminator creates an object of type Discriminator if possible, returning an error if not. +func NewDiscriminator(in *yaml.Node, context *compiler.Context) (*Discriminator, error) { + errors := make([]error, 0) + x := &Discriminator{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"propertyName"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"mapping", "propertyName"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string property_name = 1; + v1 := compiler.MapValueForKey(m, "propertyName") + if v1 != nil { + x.PropertyName, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for propertyName: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Strings mapping = 2; + v2 := compiler.MapValueForKey(m, "mapping") + if v2 != nil { + var err error + x.Mapping, err = NewStrings(v2, compiler.NewContext("mapping", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 3; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "openapi", "paths"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"components", "externalDocs", "info", "openapi", "paths", "security", "servers", "tags"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string openapi = 1; + v1 := compiler.MapValueForKey(m, "openapi") + if v1 != nil { + x.Openapi, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for openapi: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Server servers = 3; + v3 := compiler.MapValueForKey(m, "servers") + if v3 != nil { + // repeated Server + x.Servers = make([]*Server, 0) + a, ok := compiler.SequenceNodeForNode(v3) + if ok { + for _, item := range a.Content { + y, err := NewServer(item, compiler.NewContext("servers", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Servers = append(x.Servers, y) + } + } + } + // Paths paths = 4; + v4 := compiler.MapValueForKey(m, "paths") + if v4 != nil { + var err error + x.Paths, err = NewPaths(v4, compiler.NewContext("paths", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // Components components = 5; + v5 := compiler.MapValueForKey(m, "components") + if v5 != nil { + var err error + x.Components, err = NewComponents(v5, compiler.NewContext("components", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 6; + v6 := compiler.MapValueForKey(m, "security") + if v6 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := compiler.SequenceNodeForNode(v6) + if ok { + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated Tag tags = 7; + v7 := compiler.MapValueForKey(m, "tags") + if v7 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := compiler.SequenceNodeForNode(v7) + if ok { + for _, item := range a.Content { + y, err := NewTag(item, compiler.NewContext("tags", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 9; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewEncoding creates an object of type Encoding if possible, returning an error if not. +func NewEncoding(in *yaml.Node, context *compiler.Context) (*Encoding, error) { + errors := make([]error, 0) + x := &Encoding{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowReserved", "contentType", "explode", "headers", "style"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string content_type = 1; + v1 := compiler.MapValueForKey(m, "contentType") + if v1 != nil { + x.ContentType, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for contentType: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // HeadersOrReferences headers = 2; + v2 := compiler.MapValueForKey(m, "headers") + if v2 != nil { + var err error + x.Headers, err = NewHeadersOrReferences(v2, compiler.NewContext("headers", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // string style = 3; + v3 := compiler.MapValueForKey(m, "style") + if v3 != nil { + x.Style, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for style: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool explode = 4; + v4 := compiler.MapValueForKey(m, "explode") + if v4 != nil { + x.Explode, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for explode: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_reserved = 5; + v5 := compiler.MapValueForKey(m, "allowReserved") + if v5 != nil { + x.AllowReserved, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for allowReserved: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 6; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewEncodings creates an object of type Encodings if possible, returning an error if not. +func NewEncodings(in *yaml.Node, context *compiler.Context) (*Encodings, error) { + errors := make([]error, 0) + x := &Encodings{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedEncoding additional_properties = 1; + // MAP: Encoding + x.AdditionalProperties = make([]*NamedEncoding, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedEncoding{} + pair.Name = k + var err error + pair.Value, err = NewEncoding(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExample creates an object of type Example if possible, returning an error if not. +func NewExample(in *yaml.Node, context *compiler.Context) (*Example, error) { + errors := make([]error, 0) + x := &Example{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"description", "externalValue", "summary", "value"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string summary = 1; + v1 := compiler.MapValueForKey(m, "summary") + if v1 != nil { + x.Summary, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 3; + v3 := compiler.MapValueForKey(m, "value") + if v3 != nil { + var err error + x.Value, err = NewAny(v3, compiler.NewContext("value", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string external_value = 4; + v4 := compiler.MapValueForKey(m, "externalValue") + if v4 != nil { + x.ExternalValue, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for externalValue: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 5; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExampleOrReference creates an object of type ExampleOrReference if possible, returning an error if not. +func NewExampleOrReference(in *yaml.Node, context *compiler.Context) (*ExampleOrReference, error) { + errors := make([]error, 0) + x := &ExampleOrReference{} + matched := false + // Example example = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewExample(m, compiler.NewContext("example", m, context)) + if matchingError == nil { + x.Oneof = &ExampleOrReference_Example{Example: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &ExampleOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ExampleOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamplesOrReferences creates an object of type ExamplesOrReferences if possible, returning an error if not. +func NewExamplesOrReferences(in *yaml.Node, context *compiler.Context) (*ExamplesOrReferences, error) { + errors := make([]error, 0) + x := &ExamplesOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedExampleOrReference additional_properties = 1; + // MAP: ExampleOrReference + x.AdditionalProperties = make([]*NamedExampleOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedExampleOrReference{} + pair.Name = k + var err error + pair.Value, err = NewExampleOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExpression creates an object of type Expression if possible, returning an error if not. +func NewExpression(in *yaml.Node, context *compiler.Context) (*Expression, error) { + errors := make([]error, 0) + x := &Expression{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 3; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "allowReserved", "content", "deprecated", "description", "example", "examples", "explode", "required", "schema", "style"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 2; + v2 := compiler.MapValueForKey(m, "required") + if v2 != nil { + x.Required, ok = compiler.BoolForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 3; + v3 := compiler.MapValueForKey(m, "deprecated") + if v3 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 4; + v4 := compiler.MapValueForKey(m, "allowEmptyValue") + if v4 != nil { + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string style = 5; + v5 := compiler.MapValueForKey(m, "style") + if v5 != nil { + x.Style, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for style: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool explode = 6; + v6 := compiler.MapValueForKey(m, "explode") + if v6 != nil { + x.Explode, ok = compiler.BoolForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for explode: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_reserved = 7; + v7 := compiler.MapValueForKey(m, "allowReserved") + if v7 != nil { + x.AllowReserved, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for allowReserved: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaOrReference schema = 8; + v8 := compiler.MapValueForKey(m, "schema") + if v8 != nil { + var err error + x.Schema, err = NewSchemaOrReference(v8, compiler.NewContext("schema", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExamplesOrReferences examples = 10; + v10 := compiler.MapValueForKey(m, "examples") + if v10 != nil { + var err error + x.Examples, err = NewExamplesOrReferences(v10, compiler.NewContext("examples", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // MediaTypes content = 11; + v11 := compiler.MapValueForKey(m, "content") + if v11 != nil { + var err error + x.Content, err = NewMediaTypes(v11, compiler.NewContext("content", v11, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 12; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderOrReference creates an object of type HeaderOrReference if possible, returning an error if not. +func NewHeaderOrReference(in *yaml.Node, context *compiler.Context) (*HeaderOrReference, error) { + errors := make([]error, 0) + x := &HeaderOrReference{} + matched := false + // Header header = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeader(m, compiler.NewContext("header", m, context)) + if matchingError == nil { + x.Oneof = &HeaderOrReference_Header{Header: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &HeaderOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid HeaderOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeadersOrReferences creates an object of type HeadersOrReferences if possible, returning an error if not. +func NewHeadersOrReferences(in *yaml.Node, context *compiler.Context) (*HeadersOrReferences, error) { + errors := make([]error, 0) + x := &HeadersOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeaderOrReference additional_properties = 1; + // MAP: HeaderOrReference + x.AdditionalProperties = make([]*NamedHeaderOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedHeaderOrReference{} + pair.Name = k + var err error + pair.Value, err = NewHeaderOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "summary", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 3; + v3 := compiler.MapValueForKey(m, "termsOfService") + if v3 != nil { + x.TermsOfService, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 4; + v4 := compiler.MapValueForKey(m, "contact") + if v4 != nil { + var err error + x.Contact, err = NewContact(v4, compiler.NewContext("contact", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 5; + v5 := compiler.MapValueForKey(m, "license") + if v5 != nil { + var err error + x.License, err = NewLicense(v5, compiler.NewContext("license", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // string version = 6; + v6 := compiler.MapValueForKey(m, "version") + if v6 != nil { + x.Version, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 7; + v7 := compiler.MapValueForKey(m, "summary") + if v7 != nil { + x.Summary, ok = compiler.StringForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 8; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.SchemaOrReference = make([]*SchemaOrReference, 0) + y, err := NewSchemaOrReference(m, compiler.NewContext("", m, context)) + if err != nil { + return nil, err + } + x.SchemaOrReference = append(x.SchemaOrReference, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 3; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLink creates an object of type Link if possible, returning an error if not. +func NewLink(in *yaml.Node, context *compiler.Context) (*Link, error) { + errors := make([]error, 0) + x := &Link{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"description", "operationId", "operationRef", "parameters", "requestBody", "server"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string operation_ref = 1; + v1 := compiler.MapValueForKey(m, "operationRef") + if v1 != nil { + x.OperationRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for operationRef: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string operation_id = 2; + v2 := compiler.MapValueForKey(m, "operationId") + if v2 != nil { + x.OperationId, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // AnyOrExpression parameters = 3; + v3 := compiler.MapValueForKey(m, "parameters") + if v3 != nil { + var err error + x.Parameters, err = NewAnyOrExpression(v3, compiler.NewContext("parameters", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // AnyOrExpression request_body = 4; + v4 := compiler.MapValueForKey(m, "requestBody") + if v4 != nil { + var err error + x.RequestBody, err = NewAnyOrExpression(v4, compiler.NewContext("requestBody", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Server server = 6; + v6 := compiler.MapValueForKey(m, "server") + if v6 != nil { + var err error + x.Server, err = NewServer(v6, compiler.NewContext("server", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 7; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLinkOrReference creates an object of type LinkOrReference if possible, returning an error if not. +func NewLinkOrReference(in *yaml.Node, context *compiler.Context) (*LinkOrReference, error) { + errors := make([]error, 0) + x := &LinkOrReference{} + matched := false + // Link link = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewLink(m, compiler.NewContext("link", m, context)) + if matchingError == nil { + x.Oneof = &LinkOrReference_Link{Link: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &LinkOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid LinkOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLinksOrReferences creates an object of type LinksOrReferences if possible, returning an error if not. +func NewLinksOrReferences(in *yaml.Node, context *compiler.Context) (*LinksOrReferences, error) { + errors := make([]error, 0) + x := &LinksOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedLinkOrReference additional_properties = 1; + // MAP: LinkOrReference + x.AdditionalProperties = make([]*NamedLinkOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedLinkOrReference{} + pair.Name = k + var err error + pair.Value, err = NewLinkOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewMediaType creates an object of type MediaType if possible, returning an error if not. +func NewMediaType(in *yaml.Node, context *compiler.Context) (*MediaType, error) { + errors := make([]error, 0) + x := &MediaType{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"encoding", "example", "examples", "schema"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // SchemaOrReference schema = 1; + v1 := compiler.MapValueForKey(m, "schema") + if v1 != nil { + var err error + x.Schema, err = NewSchemaOrReference(v1, compiler.NewContext("schema", v1, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 2; + v2 := compiler.MapValueForKey(m, "example") + if v2 != nil { + var err error + x.Example, err = NewAny(v2, compiler.NewContext("example", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExamplesOrReferences examples = 3; + v3 := compiler.MapValueForKey(m, "examples") + if v3 != nil { + var err error + x.Examples, err = NewExamplesOrReferences(v3, compiler.NewContext("examples", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // Encodings encoding = 4; + v4 := compiler.MapValueForKey(m, "encoding") + if v4 != nil { + var err error + x.Encoding, err = NewEncodings(v4, compiler.NewContext("encoding", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 5; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewMediaTypes creates an object of type MediaTypes if possible, returning an error if not. +func NewMediaTypes(in *yaml.Node, context *compiler.Context) (*MediaTypes, error) { + errors := make([]error, 0) + x := &MediaTypes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedMediaType additional_properties = 1; + // MAP: MediaType + x.AdditionalProperties = make([]*NamedMediaType, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedMediaType{} + pair.Name = k + var err error + pair.Value, err = NewMediaType(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedCallbackOrReference creates an object of type NamedCallbackOrReference if possible, returning an error if not. +func NewNamedCallbackOrReference(in *yaml.Node, context *compiler.Context) (*NamedCallbackOrReference, error) { + errors := make([]error, 0) + x := &NamedCallbackOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // CallbackOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewCallbackOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedEncoding creates an object of type NamedEncoding if possible, returning an error if not. +func NewNamedEncoding(in *yaml.Node, context *compiler.Context) (*NamedEncoding, error) { + errors := make([]error, 0) + x := &NamedEncoding{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Encoding value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewEncoding(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedExampleOrReference creates an object of type NamedExampleOrReference if possible, returning an error if not. +func NewNamedExampleOrReference(in *yaml.Node, context *compiler.Context) (*NamedExampleOrReference, error) { + errors := make([]error, 0) + x := &NamedExampleOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExampleOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewExampleOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeaderOrReference creates an object of type NamedHeaderOrReference if possible, returning an error if not. +func NewNamedHeaderOrReference(in *yaml.Node, context *compiler.Context) (*NamedHeaderOrReference, error) { + errors := make([]error, 0) + x := &NamedHeaderOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // HeaderOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeaderOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedLinkOrReference creates an object of type NamedLinkOrReference if possible, returning an error if not. +func NewNamedLinkOrReference(in *yaml.Node, context *compiler.Context) (*NamedLinkOrReference, error) { + errors := make([]error, 0) + x := &NamedLinkOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // LinkOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewLinkOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedMediaType creates an object of type NamedMediaType if possible, returning an error if not. +func NewNamedMediaType(in *yaml.Node, context *compiler.Context) (*NamedMediaType, error) { + errors := make([]error, 0) + x := &NamedMediaType{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // MediaType value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewMediaType(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameterOrReference creates an object of type NamedParameterOrReference if possible, returning an error if not. +func NewNamedParameterOrReference(in *yaml.Node, context *compiler.Context) (*NamedParameterOrReference, error) { + errors := make([]error, 0) + x := &NamedParameterOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ParameterOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameterOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedRequestBodyOrReference creates an object of type NamedRequestBodyOrReference if possible, returning an error if not. +func NewNamedRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*NamedRequestBodyOrReference, error) { + errors := make([]error, 0) + x := &NamedRequestBodyOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // RequestBodyOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewRequestBodyOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseOrReference creates an object of type NamedResponseOrReference if possible, returning an error if not. +func NewNamedResponseOrReference(in *yaml.Node, context *compiler.Context) (*NamedResponseOrReference, error) { + errors := make([]error, 0) + x := &NamedResponseOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchemaOrReference creates an object of type NamedSchemaOrReference if possible, returning an error if not. +func NewNamedSchemaOrReference(in *yaml.Node, context *compiler.Context) (*NamedSchemaOrReference, error) { + errors := make([]error, 0) + x := &NamedSchemaOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchemaOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecuritySchemeOrReference creates an object of type NamedSecuritySchemeOrReference if possible, returning an error if not. +func NewNamedSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*NamedSecuritySchemeOrReference, error) { + errors := make([]error, 0) + x := &NamedSecuritySchemeOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecuritySchemeOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecuritySchemeOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedServerVariable creates an object of type NamedServerVariable if possible, returning an error if not. +func NewNamedServerVariable(in *yaml.Node, context *compiler.Context) (*NamedServerVariable, error) { + errors := make([]error, 0) + x := &NamedServerVariable{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ServerVariable value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewServerVariable(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauthFlow creates an object of type OauthFlow if possible, returning an error if not. +func NewOauthFlow(in *yaml.Node, context *compiler.Context) (*OauthFlow, error) { + errors := make([]error, 0) + x := &OauthFlow{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"authorizationUrl", "refreshUrl", "scopes", "tokenUrl"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string authorization_url = 1; + v1 := compiler.MapValueForKey(m, "authorizationUrl") + if v1 != nil { + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 2; + v2 := compiler.MapValueForKey(m, "tokenUrl") + if v2 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string refresh_url = 3; + v3 := compiler.MapValueForKey(m, "refreshUrl") + if v3 != nil { + x.RefreshUrl, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for refreshUrl: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Strings scopes = 4; + v4 := compiler.MapValueForKey(m, "scopes") + if v4 != nil { + var err error + x.Scopes, err = NewStrings(v4, compiler.NewContext("scopes", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 5; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauthFlows creates an object of type OauthFlows if possible, returning an error if not. +func NewOauthFlows(in *yaml.Node, context *compiler.Context) (*OauthFlows, error) { + errors := make([]error, 0) + x := &OauthFlows{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"authorizationCode", "clientCredentials", "implicit", "password"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // OauthFlow implicit = 1; + v1 := compiler.MapValueForKey(m, "implicit") + if v1 != nil { + var err error + x.Implicit, err = NewOauthFlow(v1, compiler.NewContext("implicit", v1, context)) + if err != nil { + errors = append(errors, err) + } + } + // OauthFlow password = 2; + v2 := compiler.MapValueForKey(m, "password") + if v2 != nil { + var err error + x.Password, err = NewOauthFlow(v2, compiler.NewContext("password", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // OauthFlow client_credentials = 3; + v3 := compiler.MapValueForKey(m, "clientCredentials") + if v3 != nil { + var err error + x.ClientCredentials, err = NewOauthFlow(v3, compiler.NewContext("clientCredentials", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // OauthFlow authorization_code = 4; + v4 := compiler.MapValueForKey(m, "authorizationCode") + if v4 != nil { + var err error + x.AuthorizationCode, err = NewOauthFlow(v4, compiler.NewContext("authorizationCode", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 5; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewObject creates an object of type Object if possible, returning an error if not. +func NewObject(in *yaml.Node, context *compiler.Context) (*Object, error) { + errors := make([]error, 0) + x := &Object{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"callbacks", "deprecated", "description", "externalDocs", "operationId", "parameters", "requestBody", "responses", "security", "servers", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := compiler.SequenceNodeForNode(v1) + if ok { + x.Tags = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParameterOrReference parameters = 6; + v6 := compiler.MapValueForKey(m, "parameters") + if v6 != nil { + // repeated ParameterOrReference + x.Parameters = make([]*ParameterOrReference, 0) + a, ok := compiler.SequenceNodeForNode(v6) + if ok { + for _, item := range a.Content { + y, err := NewParameterOrReference(item, compiler.NewContext("parameters", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // RequestBodyOrReference request_body = 7; + v7 := compiler.MapValueForKey(m, "requestBody") + if v7 != nil { + var err error + x.RequestBody, err = NewRequestBodyOrReference(v7, compiler.NewContext("requestBody", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // Responses responses = 8; + v8 := compiler.MapValueForKey(m, "responses") + if v8 != nil { + var err error + x.Responses, err = NewResponses(v8, compiler.NewContext("responses", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // CallbacksOrReferences callbacks = 9; + v9 := compiler.MapValueForKey(m, "callbacks") + if v9 != nil { + var err error + x.Callbacks, err = NewCallbacksOrReferences(v9, compiler.NewContext("callbacks", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // bool deprecated = 10; + v10 := compiler.MapValueForKey(m, "deprecated") + if v10 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v10) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 11; + v11 := compiler.MapValueForKey(m, "security") + if v11 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := compiler.SequenceNodeForNode(v11) + if ok { + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated Server servers = 12; + v12 := compiler.MapValueForKey(m, "servers") + if v12 != nil { + // repeated Server + x.Servers = make([]*Server, 0) + a, ok := compiler.SequenceNodeForNode(v12) + if ok { + for _, item := range a.Content { + y, err := NewServer(item, compiler.NewContext("servers", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Servers = append(x.Servers, y) + } + } + } + // repeated NamedAny specification_extension = 13; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"allowEmptyValue", "allowReserved", "content", "deprecated", "description", "example", "examples", "explode", "in", "name", "required", "schema", "style"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 5; + v5 := compiler.MapValueForKey(m, "deprecated") + if v5 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 6; + v6 := compiler.MapValueForKey(m, "allowEmptyValue") + if v6 != nil { + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string style = 7; + v7 := compiler.MapValueForKey(m, "style") + if v7 != nil { + x.Style, ok = compiler.StringForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for style: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool explode = 8; + v8 := compiler.MapValueForKey(m, "explode") + if v8 != nil { + x.Explode, ok = compiler.BoolForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for explode: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_reserved = 9; + v9 := compiler.MapValueForKey(m, "allowReserved") + if v9 != nil { + x.AllowReserved, ok = compiler.BoolForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for allowReserved: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaOrReference schema = 10; + v10 := compiler.MapValueForKey(m, "schema") + if v10 != nil { + var err error + x.Schema, err = NewSchemaOrReference(v10, compiler.NewContext("schema", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 11; + v11 := compiler.MapValueForKey(m, "example") + if v11 != nil { + var err error + x.Example, err = NewAny(v11, compiler.NewContext("example", v11, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExamplesOrReferences examples = 12; + v12 := compiler.MapValueForKey(m, "examples") + if v12 != nil { + var err error + x.Examples, err = NewExamplesOrReferences(v12, compiler.NewContext("examples", v12, context)) + if err != nil { + errors = append(errors, err) + } + } + // MediaTypes content = 13; + v13 := compiler.MapValueForKey(m, "content") + if v13 != nil { + var err error + x.Content, err = NewMediaTypes(v13, compiler.NewContext("content", v13, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 14; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterOrReference creates an object of type ParameterOrReference if possible, returning an error if not. +func NewParameterOrReference(in *yaml.Node, context *compiler.Context) (*ParameterOrReference, error) { + errors := make([]error, 0) + x := &ParameterOrReference{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", m, context)) + if matchingError == nil { + x.Oneof = &ParameterOrReference_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &ParameterOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ParameterOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersOrReferences creates an object of type ParametersOrReferences if possible, returning an error if not. +func NewParametersOrReferences(in *yaml.Node, context *compiler.Context) (*ParametersOrReferences, error) { + errors := make([]error, 0) + x := &ParametersOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameterOrReference additional_properties = 1; + // MAP: ParameterOrReference + x.AdditionalProperties = make([]*NamedParameterOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedParameterOrReference{} + pair.Name = k + var err error + pair.Value, err = NewParameterOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "description", "get", "head", "options", "parameters", "patch", "post", "put", "servers", "summary", "trace"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 4; + v4 := compiler.MapValueForKey(m, "get") + if v4 != nil { + var err error + x.Get, err = NewOperation(v4, compiler.NewContext("get", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 5; + v5 := compiler.MapValueForKey(m, "put") + if v5 != nil { + var err error + x.Put, err = NewOperation(v5, compiler.NewContext("put", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 6; + v6 := compiler.MapValueForKey(m, "post") + if v6 != nil { + var err error + x.Post, err = NewOperation(v6, compiler.NewContext("post", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 7; + v7 := compiler.MapValueForKey(m, "delete") + if v7 != nil { + var err error + x.Delete, err = NewOperation(v7, compiler.NewContext("delete", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 8; + v8 := compiler.MapValueForKey(m, "options") + if v8 != nil { + var err error + x.Options, err = NewOperation(v8, compiler.NewContext("options", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 9; + v9 := compiler.MapValueForKey(m, "head") + if v9 != nil { + var err error + x.Head, err = NewOperation(v9, compiler.NewContext("head", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 10; + v10 := compiler.MapValueForKey(m, "patch") + if v10 != nil { + var err error + x.Patch, err = NewOperation(v10, compiler.NewContext("patch", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation trace = 11; + v11 := compiler.MapValueForKey(m, "trace") + if v11 != nil { + var err error + x.Trace, err = NewOperation(v11, compiler.NewContext("trace", v11, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Server servers = 12; + v12 := compiler.MapValueForKey(m, "servers") + if v12 != nil { + // repeated Server + x.Servers = make([]*Server, 0) + a, ok := compiler.SequenceNodeForNode(v12) + if ok { + for _, item := range a.Content { + y, err := NewServer(item, compiler.NewContext("servers", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Servers = append(x.Servers, y) + } + } + } + // repeated ParameterOrReference parameters = 13; + v13 := compiler.MapValueForKey(m, "parameters") + if v13 != nil { + // repeated ParameterOrReference + x.Parameters = make([]*ParameterOrReference, 0) + a, ok := compiler.SequenceNodeForNode(v13) + if ok { + for _, item := range a.Content { + y, err := NewParameterOrReference(item, compiler.NewContext("parameters", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny specification_extension = 14; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedPathItem path = 1; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + // repeated NamedAny specification_extension = 2; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchemaOrReference additional_properties = 1; + // MAP: SchemaOrReference + x.AdditionalProperties = make([]*NamedSchemaOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSchemaOrReference{} + pair.Name = k + var err error + pair.Value, err = NewSchemaOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewReference creates an object of type Reference if possible, returning an error if not. +func NewReference(in *yaml.Node, context *compiler.Context) (*Reference, error) { + errors := make([]error, 0) + x := &Reference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewRequestBodiesOrReferences creates an object of type RequestBodiesOrReferences if possible, returning an error if not. +func NewRequestBodiesOrReferences(in *yaml.Node, context *compiler.Context) (*RequestBodiesOrReferences, error) { + errors := make([]error, 0) + x := &RequestBodiesOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedRequestBodyOrReference additional_properties = 1; + // MAP: RequestBodyOrReference + x.AdditionalProperties = make([]*NamedRequestBodyOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedRequestBodyOrReference{} + pair.Name = k + var err error + pair.Value, err = NewRequestBodyOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewRequestBody creates an object of type RequestBody if possible, returning an error if not. +func NewRequestBody(in *yaml.Node, context *compiler.Context) (*RequestBody, error) { + errors := make([]error, 0) + x := &RequestBody{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"content"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"content", "description", "required"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // MediaTypes content = 2; + v2 := compiler.MapValueForKey(m, "content") + if v2 != nil { + var err error + x.Content, err = NewMediaTypes(v2, compiler.NewContext("content", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // bool required = 3; + v3 := compiler.MapValueForKey(m, "required") + if v3 != nil { + x.Required, ok = compiler.BoolForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 4; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewRequestBodyOrReference creates an object of type RequestBodyOrReference if possible, returning an error if not. +func NewRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*RequestBodyOrReference, error) { + errors := make([]error, 0) + x := &RequestBodyOrReference{} + matched := false + // RequestBody request_body = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewRequestBody(m, compiler.NewContext("requestBody", m, context)) + if matchingError == nil { + x.Oneof = &RequestBodyOrReference_RequestBody{RequestBody: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &RequestBodyOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid RequestBodyOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"content", "description", "headers", "links"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // HeadersOrReferences headers = 2; + v2 := compiler.MapValueForKey(m, "headers") + if v2 != nil { + var err error + x.Headers, err = NewHeadersOrReferences(v2, compiler.NewContext("headers", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // MediaTypes content = 3; + v3 := compiler.MapValueForKey(m, "content") + if v3 != nil { + var err error + x.Content, err = NewMediaTypes(v3, compiler.NewContext("content", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // LinksOrReferences links = 4; + v4 := compiler.MapValueForKey(m, "links") + if v4 != nil { + var err error + x.Links, err = NewLinksOrReferences(v4, compiler.NewContext("links", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 5; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseOrReference creates an object of type ResponseOrReference if possible, returning an error if not. +func NewResponseOrReference(in *yaml.Node, context *compiler.Context) (*ResponseOrReference, error) { + errors := make([]error, 0) + x := &ResponseOrReference{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", m, context)) + if matchingError == nil { + x.Oneof = &ResponseOrReference_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &ResponseOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ResponseOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"default"} + allowedPatterns := []*regexp.Regexp{pattern3, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // ResponseOrReference default = 1; + v1 := compiler.MapValueForKey(m, "default") + if v1 != nil { + var err error + x.Default, err = NewResponseOrReference(v1, compiler.NewContext("default", v1, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedResponseOrReference response_or_reference = 2; + // MAP: ResponseOrReference ^([0-9X]{3})$ + x.ResponseOrReference = make([]*NamedResponseOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if pattern3.MatchString(k) { + pair := &NamedResponseOrReference{} + pair.Name = k + var err error + pair.Value, err = NewResponseOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseOrReference = append(x.ResponseOrReference, pair) + } + } + } + // repeated NamedAny specification_extension = 3; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponsesOrReferences creates an object of type ResponsesOrReferences if possible, returning an error if not. +func NewResponsesOrReferences(in *yaml.Node, context *compiler.Context) (*ResponsesOrReferences, error) { + errors := make([]error, 0) + x := &ResponsesOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponseOrReference additional_properties = 1; + // MAP: ResponseOrReference + x.AdditionalProperties = make([]*NamedResponseOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedResponseOrReference{} + pair.Name = k + var err error + pair.Value, err = NewResponseOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"additionalProperties", "allOf", "anyOf", "default", "deprecated", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "not", "nullable", "oneOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "writeOnly", "xml"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool nullable = 1; + v1 := compiler.MapValueForKey(m, "nullable") + if v1 != nil { + x.Nullable, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for nullable: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Discriminator discriminator = 2; + v2 := compiler.MapValueForKey(m, "discriminator") + if v2 != nil { + var err error + x.Discriminator, err = NewDiscriminator(v2, compiler.NewContext("discriminator", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // bool read_only = 3; + v3 := compiler.MapValueForKey(m, "readOnly") + if v3 != nil { + x.ReadOnly, ok = compiler.BoolForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool write_only = 4; + v4 := compiler.MapValueForKey(m, "writeOnly") + if v4 != nil { + x.WriteOnly, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for writeOnly: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 5; + v5 := compiler.MapValueForKey(m, "xml") + if v5 != nil { + var err error + x.Xml, err = NewXml(v5, compiler.NewContext("xml", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 6; + v6 := compiler.MapValueForKey(m, "externalDocs") + if v6 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v6, compiler.NewContext("externalDocs", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 7; + v7 := compiler.MapValueForKey(m, "example") + if v7 != nil { + var err error + x.Example, err = NewAny(v7, compiler.NewContext("example", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // bool deprecated = 8; + v8 := compiler.MapValueForKey(m, "deprecated") + if v8 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 9; + v9 := compiler.MapValueForKey(m, "title") + if v9 != nil { + x.Title, ok = compiler.StringForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float multiple_of = 10; + v10 := compiler.MapValueForKey(m, "multipleOf") + if v10 != nil { + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := compiler.IntForScalarNode(v16) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v17) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := compiler.IntForScalarNode(v19) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 21; + v21 := compiler.MapValueForKey(m, "maxProperties") + if v21 != nil { + t, ok := compiler.IntForScalarNode(v21) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v21)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 22; + v22 := compiler.MapValueForKey(m, "minProperties") + if v22 != nil { + t, ok := compiler.IntForScalarNode(v22) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v22)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 23; + v23 := compiler.MapValueForKey(m, "required") + if v23 != nil { + v, ok := compiler.SequenceNodeForNode(v23) + if ok { + x.Required = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v23)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 24; + v24 := compiler.MapValueForKey(m, "enum") + if v24 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v24) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // string type = 25; + v25 := compiler.MapValueForKey(m, "type") + if v25 != nil { + x.Type, ok = compiler.StringForScalarNode(v25) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v25)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SchemaOrReference all_of = 26; + v26 := compiler.MapValueForKey(m, "allOf") + if v26 != nil { + // repeated SchemaOrReference + x.AllOf = make([]*SchemaOrReference, 0) + a, ok := compiler.SequenceNodeForNode(v26) + if ok { + for _, item := range a.Content { + y, err := NewSchemaOrReference(item, compiler.NewContext("allOf", item, context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // repeated SchemaOrReference one_of = 27; + v27 := compiler.MapValueForKey(m, "oneOf") + if v27 != nil { + // repeated SchemaOrReference + x.OneOf = make([]*SchemaOrReference, 0) + a, ok := compiler.SequenceNodeForNode(v27) + if ok { + for _, item := range a.Content { + y, err := NewSchemaOrReference(item, compiler.NewContext("oneOf", item, context)) + if err != nil { + errors = append(errors, err) + } + x.OneOf = append(x.OneOf, y) + } + } + } + // repeated SchemaOrReference any_of = 28; + v28 := compiler.MapValueForKey(m, "anyOf") + if v28 != nil { + // repeated SchemaOrReference + x.AnyOf = make([]*SchemaOrReference, 0) + a, ok := compiler.SequenceNodeForNode(v28) + if ok { + for _, item := range a.Content { + y, err := NewSchemaOrReference(item, compiler.NewContext("anyOf", item, context)) + if err != nil { + errors = append(errors, err) + } + x.AnyOf = append(x.AnyOf, y) + } + } + } + // Schema not = 29; + v29 := compiler.MapValueForKey(m, "not") + if v29 != nil { + var err error + x.Not, err = NewSchema(v29, compiler.NewContext("not", v29, context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 30; + v30 := compiler.MapValueForKey(m, "items") + if v30 != nil { + var err error + x.Items, err = NewItemsItem(v30, compiler.NewContext("items", v30, context)) + if err != nil { + errors = append(errors, err) + } + } + // Properties properties = 31; + v31 := compiler.MapValueForKey(m, "properties") + if v31 != nil { + var err error + x.Properties, err = NewProperties(v31, compiler.NewContext("properties", v31, context)) + if err != nil { + errors = append(errors, err) + } + } + // AdditionalPropertiesItem additional_properties = 32; + v32 := compiler.MapValueForKey(m, "additionalProperties") + if v32 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v32, compiler.NewContext("additionalProperties", v32, context)) + if err != nil { + errors = append(errors, err) + } + } + // DefaultType default = 33; + v33 := compiler.MapValueForKey(m, "default") + if v33 != nil { + var err error + x.Default, err = NewDefaultType(v33, compiler.NewContext("default", v33, context)) + if err != nil { + errors = append(errors, err) + } + } + // string description = 34; + v34 := compiler.MapValueForKey(m, "description") + if v34 != nil { + x.Description, ok = compiler.StringForScalarNode(v34) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v34)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 35; + v35 := compiler.MapValueForKey(m, "format") + if v35 != nil { + x.Format, ok = compiler.StringForScalarNode(v35) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v35)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 36; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaOrReference creates an object of type SchemaOrReference if possible, returning an error if not. +func NewSchemaOrReference(in *yaml.Node, context *compiler.Context) (*SchemaOrReference, error) { + errors := make([]error, 0) + x := &SchemaOrReference{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) + if matchingError == nil { + x.Oneof = &SchemaOrReference_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &SchemaOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid SchemaOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemasOrReferences creates an object of type SchemasOrReferences if possible, returning an error if not. +func NewSchemasOrReferences(in *yaml.Node, context *compiler.Context) (*SchemasOrReferences, error) { + errors := make([]error, 0) + x := &SchemasOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchemaOrReference additional_properties = 1; + // MAP: SchemaOrReference + x.AdditionalProperties = make([]*NamedSchemaOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSchemaOrReference{} + pair.Name = k + var err error + pair.Value, err = NewSchemaOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityScheme creates an object of type SecurityScheme if possible, returning an error if not. +func NewSecurityScheme(in *yaml.Node, context *compiler.Context) (*SecurityScheme, error) { + errors := make([]error, 0) + x := &SecurityScheme{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"bearerFormat", "description", "flows", "in", "name", "openIdConnectUrl", "scheme", "type"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 3; + v3 := compiler.MapValueForKey(m, "name") + if v3 != nil { + x.Name, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 4; + v4 := compiler.MapValueForKey(m, "in") + if v4 != nil { + x.In, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string scheme = 5; + v5 := compiler.MapValueForKey(m, "scheme") + if v5 != nil { + x.Scheme, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for scheme: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string bearer_format = 6; + v6 := compiler.MapValueForKey(m, "bearerFormat") + if v6 != nil { + x.BearerFormat, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for bearerFormat: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // OauthFlows flows = 7; + v7 := compiler.MapValueForKey(m, "flows") + if v7 != nil { + var err error + x.Flows, err = NewOauthFlows(v7, compiler.NewContext("flows", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // string open_id_connect_url = 8; + v8 := compiler.MapValueForKey(m, "openIdConnectUrl") + if v8 != nil { + x.OpenIdConnectUrl, ok = compiler.StringForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for openIdConnectUrl: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 9; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecuritySchemeOrReference creates an object of type SecuritySchemeOrReference if possible, returning an error if not. +func NewSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*SecuritySchemeOrReference, error) { + errors := make([]error, 0) + x := &SecuritySchemeOrReference{} + matched := false + // SecurityScheme security_scheme = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSecurityScheme(m, compiler.NewContext("securityScheme", m, context)) + if matchingError == nil { + x.Oneof = &SecuritySchemeOrReference_SecurityScheme{SecurityScheme: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &SecuritySchemeOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid SecuritySchemeOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecuritySchemesOrReferences creates an object of type SecuritySchemesOrReferences if possible, returning an error if not. +func NewSecuritySchemesOrReferences(in *yaml.Node, context *compiler.Context) (*SecuritySchemesOrReferences, error) { + errors := make([]error, 0) + x := &SecuritySchemesOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecuritySchemeOrReference additional_properties = 1; + // MAP: SecuritySchemeOrReference + x.AdditionalProperties = make([]*NamedSecuritySchemeOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSecuritySchemeOrReference{} + pair.Name = k + var err error + pair.Value, err = NewSecuritySchemeOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewServer creates an object of type Server if possible, returning an error if not. +func NewServer(in *yaml.Node, context *compiler.Context) (*Server, error) { + errors := make([]error, 0) + x := &Server{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url", "variables"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string url = 1; + v1 := compiler.MapValueForKey(m, "url") + if v1 != nil { + x.Url, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ServerVariables variables = 3; + v3 := compiler.MapValueForKey(m, "variables") + if v3 != nil { + var err error + x.Variables, err = NewServerVariables(v3, compiler.NewContext("variables", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 4; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewServerVariable creates an object of type ServerVariable if possible, returning an error if not. +func NewServerVariable(in *yaml.Node, context *compiler.Context) (*ServerVariable, error) { + errors := make([]error, 0) + x := &ServerVariable{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"default"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "enum"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string enum = 1; + v1 := compiler.MapValueForKey(m, "enum") + if v1 != nil { + v, ok := compiler.SequenceNodeForNode(v1) + if ok { + x.Enum = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for enum: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string default = 2; + v2 := compiler.MapValueForKey(m, "default") + if v2 != nil { + x.Default, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for default: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 4; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewServerVariables creates an object of type ServerVariables if possible, returning an error if not. +func NewServerVariables(in *yaml.Node, context *compiler.Context) (*ServerVariables, error) { + errors := make([]error, 0) + x := &ServerVariables{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedServerVariable additional_properties = 1; + // MAP: ServerVariable + x.AdditionalProperties = make([]*NamedServerVariable, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedServerVariable{} + pair.Name = k + var err error + pair.Value, err = NewServerVariable(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSpecificationExtension creates an object of type SpecificationExtension if possible, returning an error if not. +func NewSpecificationExtension(in *yaml.Node, context *compiler.Context) (*SpecificationExtension, error) { + errors := make([]error, 0) + x := &SpecificationExtension{} + matched := false + switch in.Tag { + case "!!bool": + var v bool + v, matched = compiler.BoolForScalarNode(in) + x.Oneof = &SpecificationExtension_Boolean{Boolean: v} + case "!!str": + var v string + v, matched = compiler.StringForScalarNode(in) + x.Oneof = &SpecificationExtension_String_{String_: v} + case "!!float": + var v float64 + v, matched = compiler.FloatForScalarNode(in) + x.Oneof = &SpecificationExtension_Number{Number: v} + case "!!int": + var v int64 + v, matched = compiler.IntForScalarNode(in) + x.Oneof = &SpecificationExtension_Number{Number: float64(v)} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + x.Value = make([]string, 0) + for _, node := range in.Content { + s, _ := compiler.StringForScalarNode(node) + x.Value = append(x.Value, s) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStrings creates an object of type Strings if possible, returning an error if not. +func NewStrings(in *yaml.Node, context *compiler.Context) (*Strings, error) { + errors := make([]error, 0) + x := &Strings{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedString{} + pair.Name = k + pair.Value, _ = compiler.StringForScalarNode(v) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 4; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 6; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_SchemaOrReference) + if ok { + _, err := p.SchemaOrReference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AnyOrExpression objects. +func (m *AnyOrExpression) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AnyOrExpression_Any) + if ok { + _, err := p.Any.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*AnyOrExpression_Expression) + if ok { + _, err := p.Expression.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Callback objects. +func (m *Callback) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside CallbackOrReference objects. +func (m *CallbackOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*CallbackOrReference_Callback) + if ok { + _, err := p.Callback.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*CallbackOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside CallbacksOrReferences objects. +func (m *CallbacksOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Components objects. +func (m *Components) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schemas != nil { + _, err := m.Schemas.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.RequestBodies != nil { + _, err := m.RequestBodies.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.SecuritySchemes != nil { + _, err := m.SecuritySchemes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Links != nil { + _, err := m.Links.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Callbacks != nil { + _, err := m.Callbacks.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside DefaultType objects. +func (m *DefaultType) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Discriminator objects. +func (m *Discriminator) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Mapping != nil { + _, err := m.Mapping.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Servers { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Components != nil { + _, err := m.Components.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Encoding objects. +func (m *Encoding) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Encodings objects. +func (m *Encodings) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Example objects. +func (m *Example) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExampleOrReference objects. +func (m *ExampleOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ExampleOrReference_Example) + if ok { + _, err := p.Example.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ExampleOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExamplesOrReferences objects. +func (m *ExamplesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Expression objects. +func (m *Expression) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Content != nil { + _, err := m.Content.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderOrReference objects. +func (m *HeaderOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*HeaderOrReference_Header) + if ok { + _, err := p.Header.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*HeaderOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeadersOrReferences objects. +func (m *HeadersOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SchemaOrReference { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Link objects. +func (m *Link) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.RequestBody != nil { + _, err := m.RequestBody.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Server != nil { + _, err := m.Server.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside LinkOrReference objects. +func (m *LinkOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*LinkOrReference_Link) + if ok { + _, err := p.Link.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*LinkOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside LinksOrReferences objects. +func (m *LinksOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside MediaType objects. +func (m *MediaType) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Encoding != nil { + _, err := m.Encoding.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside MediaTypes objects. +func (m *MediaTypes) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedCallbackOrReference objects. +func (m *NamedCallbackOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedEncoding objects. +func (m *NamedEncoding) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedExampleOrReference objects. +func (m *NamedExampleOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeaderOrReference objects. +func (m *NamedHeaderOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedLinkOrReference objects. +func (m *NamedLinkOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedMediaType objects. +func (m *NamedMediaType) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameterOrReference objects. +func (m *NamedParameterOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedRequestBodyOrReference objects. +func (m *NamedRequestBodyOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseOrReference objects. +func (m *NamedResponseOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchemaOrReference objects. +func (m *NamedSchemaOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecuritySchemeOrReference objects. +func (m *NamedSecuritySchemeOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedServerVariable objects. +func (m *NamedServerVariable) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside OauthFlow objects. +func (m *OauthFlow) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside OauthFlows objects. +func (m *OauthFlows) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Implicit != nil { + _, err := m.Implicit.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Password != nil { + _, err := m.Password.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ClientCredentials != nil { + _, err := m.ClientCredentials.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.AuthorizationCode != nil { + _, err := m.AuthorizationCode.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Object objects. +func (m *Object) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.RequestBody != nil { + _, err := m.RequestBody.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Callbacks != nil { + _, err := m.Callbacks.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Servers { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Content != nil { + _, err := m.Content.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterOrReference objects. +func (m *ParameterOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParameterOrReference_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParameterOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersOrReferences objects. +func (m *ParametersOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Trace != nil { + _, err := m.Trace.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Servers { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Reference objects. +func (m *Reference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside RequestBodiesOrReferences objects. +func (m *RequestBodiesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside RequestBody objects. +func (m *RequestBody) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Content != nil { + _, err := m.Content.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside RequestBodyOrReference objects. +func (m *RequestBodyOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*RequestBodyOrReference_RequestBody) + if ok { + _, err := p.RequestBody.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*RequestBodyOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Content != nil { + _, err := m.Content.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Links != nil { + _, err := m.Links.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseOrReference objects. +func (m *ResponseOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseOrReference_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.ResponseOrReference { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponsesOrReferences objects. +func (m *ResponsesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Discriminator != nil { + _, err := m.Discriminator.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.OneOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.AnyOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Not != nil { + _, err := m.Not.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaOrReference objects. +func (m *SchemaOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaOrReference_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemasOrReferences objects. +func (m *SchemasOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityScheme objects. +func (m *SecurityScheme) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Flows != nil { + _, err := m.Flows.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecuritySchemeOrReference objects. +func (m *SecuritySchemeOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecuritySchemeOrReference_SecurityScheme) + if ok { + _, err := p.SecurityScheme.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecuritySchemeOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecuritySchemesOrReferences objects. +func (m *SecuritySchemesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Server objects. +func (m *Server) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Variables != nil { + _, err := m.Variables.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ServerVariable objects. +func (m *ServerVariable) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ServerVariables objects. +func (m *ServerVariables) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SpecificationExtension objects. +func (m *SpecificationExtension) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Strings objects. +func (m *Strings) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schemaOrReference Type:SchemaOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchemaOrReference() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return compiler.NewScalarNodeForBool(v1.Boolean) + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() *yaml.Node { + var err error + var node yaml.Node + err = yaml.Unmarshal([]byte(m.Yaml), &node) + if err == nil { + if node.Kind == yaml.DocumentNode { + return node.Content[0] + } + return &node + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of AnyOrExpression suitable for JSON or YAML export. +func (m *AnyOrExpression) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // AnyOrExpression + // {Name:any Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetAny() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:expression Type:Expression StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetExpression() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Callback suitable for JSON or YAML export. +func (m *Callback) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Path != nil { + for _, item := range m.Path { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of CallbackOrReference suitable for JSON or YAML export. +func (m *CallbackOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // CallbackOrReference + // {Name:callback Type:Callback StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetCallback() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of CallbacksOrReferences suitable for JSON or YAML export. +func (m *CallbacksOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Components suitable for JSON or YAML export. +func (m *Components) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Schemas != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemas")) + info.Content = append(info.Content, m.Schemas.ToRawInfo()) + } + if m.Responses != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) + } + if m.Parameters != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, m.Parameters.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.RequestBodies != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("requestBodies")) + info.Content = append(info.Content, m.RequestBodies.ToRawInfo()) + } + if m.Headers != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) + } + if m.SecuritySchemes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("securitySchemes")) + info.Content = append(info.Content, m.SecuritySchemes.ToRawInfo()) + } + if m.Links != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("links")) + info.Content = append(info.Content, m.Links.ToRawInfo()) + } + if m.Callbacks != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("callbacks")) + info.Content = append(info.Content, m.Callbacks.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Url != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + } + if m.Email != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of DefaultType suitable for JSON or YAML export. +func (m *DefaultType) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // DefaultType + // {Name:number Type:float StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v0, ok := m.GetOneof().(*DefaultType_Number); ok { + return compiler.NewScalarNodeForFloat(v0.Number) + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*DefaultType_Boolean); ok { + return compiler.NewScalarNodeForBool(v1.Boolean) + } + // {Name:string Type:string StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v2, ok := m.GetOneof().(*DefaultType_String_); ok { + return compiler.NewScalarNodeForString(v2.String_) + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Discriminator suitable for JSON or YAML export. +func (m *Discriminator) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("propertyName")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.PropertyName)) + if m.Mapping != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("mapping")) + info.Content = append(info.Content, m.Mapping.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("openapi")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Openapi)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) + info.Content = append(info.Content, m.Info.ToRawInfo()) + if len(m.Servers) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Servers { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("servers")) + info.Content = append(info.Content, items) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) + info.Content = append(info.Content, m.Paths.ToRawInfo()) + if m.Components != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("components")) + info.Content = append(info.Content, m.Components.ToRawInfo()) + } + if len(m.Security) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Security { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) + } + if len(m.Tags) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Tags { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, items) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Encoding suitable for JSON or YAML export. +func (m *Encoding) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.ContentType != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("contentType")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.ContentType)) + } + if m.Headers != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) + } + if m.Style != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("style")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Style)) + } + if m.Explode != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("explode")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Explode)) + } + if m.AllowReserved != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowReserved")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowReserved)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Encodings suitable for JSON or YAML export. +func (m *Encodings) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Example suitable for JSON or YAML export. +func (m *Example) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Value != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, m.Value.ToRawInfo()) + } + if m.ExternalValue != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.ExternalValue)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ExampleOrReference suitable for JSON or YAML export. +func (m *ExampleOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ExampleOrReference + // {Name:example Type:Example StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetExample() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of ExamplesOrReferences suitable for JSON or YAML export. +func (m *ExamplesOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Expression suitable for JSON or YAML export. +func (m *Expression) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if m.AllowEmptyValue != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) + } + if m.Style != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("style")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Style)) + } + if m.Explode != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("explode")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Explode)) + } + if m.AllowReserved != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowReserved")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowReserved)) + } + if m.Schema != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.Content != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) + info.Content = append(info.Content, m.Content.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of HeaderOrReference suitable for JSON or YAML export. +func (m *HeaderOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // HeaderOrReference + // {Name:header Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeader() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of HeadersOrReferences suitable for JSON or YAML export. +func (m *HeadersOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.TermsOfService != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) + } + if m.Contact != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) + info.Content = append(info.Content, m.Contact.ToRawInfo()) + } + if m.License != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) + info.Content = append(info.Content, m.License.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.SchemaOrReference) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.SchemaOrReference { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemaOrReference")) + info.Content = append(info.Content, items) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + if m.Url != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Link suitable for JSON or YAML export. +func (m *Link) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.OperationRef != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationRef")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationRef)) + } + if m.OperationId != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) + } + if m.Parameters != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, m.Parameters.ToRawInfo()) + } + if m.RequestBody != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("requestBody")) + info.Content = append(info.Content, m.RequestBody.ToRawInfo()) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Server != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("server")) + info.Content = append(info.Content, m.Server.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of LinkOrReference suitable for JSON or YAML export. +func (m *LinkOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // LinkOrReference + // {Name:link Type:Link StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetLink() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of LinksOrReferences suitable for JSON or YAML export. +func (m *LinksOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of MediaType suitable for JSON or YAML export. +func (m *MediaType) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Schema != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.Encoding != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("encoding")) + info.Content = append(info.Content, m.Encoding.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of MediaTypes suitable for JSON or YAML export. +func (m *MediaTypes) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Value != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, m.Value.ToRawInfo()) + } + return info +} + +// ToRawInfo returns a description of NamedCallbackOrReference suitable for JSON or YAML export. +func (m *NamedCallbackOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:CallbackOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedEncoding suitable for JSON or YAML export. +func (m *NamedEncoding) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Encoding StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedExampleOrReference suitable for JSON or YAML export. +func (m *NamedExampleOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ExampleOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedHeaderOrReference suitable for JSON or YAML export. +func (m *NamedHeaderOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:HeaderOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedLinkOrReference suitable for JSON or YAML export. +func (m *NamedLinkOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:LinkOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedMediaType suitable for JSON or YAML export. +func (m *NamedMediaType) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:MediaType StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameterOrReference suitable for JSON or YAML export. +func (m *NamedParameterOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ParameterOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedRequestBodyOrReference suitable for JSON or YAML export. +func (m *NamedRequestBodyOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:RequestBodyOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseOrReference suitable for JSON or YAML export. +func (m *NamedResponseOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ResponseOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchemaOrReference suitable for JSON or YAML export. +func (m *NamedSchemaOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:SchemaOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecuritySchemeOrReference suitable for JSON or YAML export. +func (m *NamedSecuritySchemeOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:SecuritySchemeOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedServerVariable suitable for JSON or YAML export. +func (m *NamedServerVariable) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ServerVariable StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Value != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of OauthFlow suitable for JSON or YAML export. +func (m *OauthFlow) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AuthorizationUrl != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) + } + if m.TokenUrl != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + } + if m.RefreshUrl != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("refreshUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.RefreshUrl)) + } + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of OauthFlows suitable for JSON or YAML export. +func (m *OauthFlows) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Implicit != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("implicit")) + info.Content = append(info.Content, m.Implicit.ToRawInfo()) + } + if m.Password != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("password")) + info.Content = append(info.Content, m.Password.ToRawInfo()) + } + if m.ClientCredentials != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("clientCredentials")) + info.Content = append(info.Content, m.ClientCredentials.ToRawInfo()) + } + if m.AuthorizationCode != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationCode")) + info.Content = append(info.Content, m.AuthorizationCode.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Object suitable for JSON or YAML export. +func (m *Object) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Tags) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) + } + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.OperationId != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) + } + if len(m.Parameters) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Parameters { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) + } + if m.RequestBody != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("requestBody")) + info.Content = append(info.Content, m.RequestBody.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) + if m.Callbacks != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("callbacks")) + info.Content = append(info.Content, m.Callbacks.ToRawInfo()) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if len(m.Security) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Security { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) + } + if len(m.Servers) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Servers { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("servers")) + info.Content = append(info.Content, items) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if m.AllowEmptyValue != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) + } + if m.Style != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("style")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Style)) + } + if m.Explode != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("explode")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Explode)) + } + if m.AllowReserved != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowReserved")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowReserved)) + } + if m.Schema != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.Content != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) + info.Content = append(info.Content, m.Content.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ParameterOrReference suitable for JSON or YAML export. +func (m *ParameterOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ParameterOrReference + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of ParametersOrReferences suitable for JSON or YAML export. +func (m *ParametersOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.XRef != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + } + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Get != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) + info.Content = append(info.Content, m.Get.ToRawInfo()) + } + if m.Put != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) + info.Content = append(info.Content, m.Put.ToRawInfo()) + } + if m.Post != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) + info.Content = append(info.Content, m.Post.ToRawInfo()) + } + if m.Delete != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) + info.Content = append(info.Content, m.Delete.ToRawInfo()) + } + if m.Options != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) + info.Content = append(info.Content, m.Options.ToRawInfo()) + } + if m.Head != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) + info.Content = append(info.Content, m.Head.ToRawInfo()) + } + if m.Patch != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) + info.Content = append(info.Content, m.Patch.ToRawInfo()) + } + if m.Trace != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("trace")) + info.Content = append(info.Content, m.Trace.ToRawInfo()) + } + if len(m.Servers) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Servers { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("servers")) + info.Content = append(info.Content, items) + } + if len(m.Parameters) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Parameters { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Path != nil { + for _, item := range m.Path { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Reference suitable for JSON or YAML export. +func (m *Reference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + return info +} + +// ToRawInfo returns a description of RequestBodiesOrReferences suitable for JSON or YAML export. +func (m *RequestBodiesOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of RequestBody suitable for JSON or YAML export. +func (m *RequestBody) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) + info.Content = append(info.Content, m.Content.ToRawInfo()) + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of RequestBodyOrReference suitable for JSON or YAML export. +func (m *RequestBodyOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // RequestBodyOrReference + // {Name:requestBody Type:RequestBody StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetRequestBody() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + if m.Headers != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) + } + if m.Content != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) + info.Content = append(info.Content, m.Content.ToRawInfo()) + } + if m.Links != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("links")) + info.Content = append(info.Content, m.Links.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ResponseOrReference suitable for JSON or YAML export. +func (m *ResponseOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ResponseOrReference + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.ResponseOrReference != nil { + for _, item := range m.ResponseOrReference { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ResponsesOrReferences suitable for JSON or YAML export. +func (m *ResponsesOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Nullable != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("nullable")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Nullable)) + } + if m.Discriminator != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) + info.Content = append(info.Content, m.Discriminator.ToRawInfo()) + } + if m.ReadOnly != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) + } + if m.WriteOnly != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("writeOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.WriteOnly)) + } + if m.Xml != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) + info.Content = append(info.Content, m.Xml.ToRawInfo()) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if m.Title != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if m.MaxProperties != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) + } + if m.MinProperties != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) + } + if len(m.Required) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if len(m.AllOf) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.AllOf { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) + info.Content = append(info.Content, items) + } + if len(m.OneOf) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.OneOf { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("oneOf")) + info.Content = append(info.Content, items) + } + if len(m.AnyOf) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.AnyOf { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("anyOf")) + info.Content = append(info.Content, items) + } + if m.Not != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("not")) + info.Content = append(info.Content, m.Not.ToRawInfo()) + } + if m.Items != nil { + items := compiler.NewSequenceNode() + for _, item := range m.Items.SchemaOrReference { + items.Content = append(items.Content, item.ToRawInfo()) + } + if len(items.Content) == 1 { + items = items.Content[0] + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, items) + } + if m.Properties != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) + info.Content = append(info.Content, m.Properties.ToRawInfo()) + } + if m.AdditionalProperties != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) + info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SchemaOrReference suitable for JSON or YAML export. +func (m *SchemaOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SchemaOrReference + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of SchemasOrReferences suitable for JSON or YAML export. +func (m *SchemasOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SecurityScheme suitable for JSON or YAML export. +func (m *SecurityScheme) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Scheme != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scheme")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Scheme)) + } + if m.BearerFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("bearerFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BearerFormat)) + } + if m.Flows != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("flows")) + info.Content = append(info.Content, m.Flows.ToRawInfo()) + } + if m.OpenIdConnectUrl != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("openIdConnectUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OpenIdConnectUrl)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SecuritySchemeOrReference suitable for JSON or YAML export. +func (m *SecuritySchemeOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SecuritySchemeOrReference + // {Name:securityScheme Type:SecurityScheme StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSecurityScheme() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of SecuritySchemesOrReferences suitable for JSON or YAML export. +func (m *SecuritySchemesOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Server suitable for JSON or YAML export. +func (m *Server) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Variables != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("variables")) + info.Content = append(info.Content, m.Variables.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ServerVariable suitable for JSON or YAML export. +func (m *ServerVariable) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Enum) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Enum)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Default)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ServerVariables suitable for JSON or YAML export. +func (m *ServerVariables) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SpecificationExtension suitable for JSON or YAML export. +func (m *SpecificationExtension) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SpecificationExtension + // {Name:number Type:float StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v0, ok := m.GetOneof().(*SpecificationExtension_Number); ok { + return compiler.NewScalarNodeForFloat(v0.Number) + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*SpecificationExtension_Boolean); ok { + return compiler.NewScalarNodeForBool(v1.Boolean) + } + // {Name:string Type:string StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v2, ok := m.GetOneof().(*SpecificationExtension_String_); ok { + return compiler.NewScalarNodeForString(v2.String_) + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() *yaml.Node { + return compiler.NewSequenceNodeForStringArray(m.Value) +} + +// ToRawInfo returns a description of Strings suitable for JSON or YAML export. +func (m *Strings) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Namespace != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) + } + if m.Prefix != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) + } + if m.Attribute != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) + } + if m.Wrapped != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +var ( + pattern0 = regexp.MustCompile("^") + pattern1 = regexp.MustCompile("^x-") + pattern2 = regexp.MustCompile("^/") + pattern3 = regexp.MustCompile("^([0-9X]{3})$") +) diff --git a/vendor/github.com/googleapis/gnostic/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/googleapis/gnostic/openapiv3/OpenAPIv3.pb.go new file mode 100644 index 000000000..720fca23a --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv3/OpenAPIv3.pb.go @@ -0,0 +1,8033 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.15.5 +// source: openapiv3/OpenAPIv3.proto + +package openapi_v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AdditionalPropertiesItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *AdditionalPropertiesItem_SchemaOrReference + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *AdditionalPropertiesItem) Reset() { + *x = AdditionalPropertiesItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AdditionalPropertiesItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AdditionalPropertiesItem) ProtoMessage() {} + +func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{0} +} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *AdditionalPropertiesItem) GetSchemaOrReference() *SchemaOrReference { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_SchemaOrReference); ok { + return x.SchemaOrReference + } + return nil +} + +func (x *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_SchemaOrReference struct { + SchemaOrReference *SchemaOrReference `protobuf:"bytes,1,opt,name=schema_or_reference,json=schemaOrReference,proto3,oneof"` +} + +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +} + +func (*AdditionalPropertiesItem_SchemaOrReference) isAdditionalPropertiesItem_Oneof() {} + +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{1} +} + +func (x *Any) GetValue() *anypb.Any { + if x != nil { + return x.Value + } + return nil +} + +func (x *Any) GetYaml() string { + if x != nil { + return x.Yaml + } + return "" +} + +type AnyOrExpression struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *AnyOrExpression_Any + // *AnyOrExpression_Expression + Oneof isAnyOrExpression_Oneof `protobuf_oneof:"oneof"` +} + +func (x *AnyOrExpression) Reset() { + *x = AnyOrExpression{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnyOrExpression) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnyOrExpression) ProtoMessage() {} + +func (x *AnyOrExpression) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnyOrExpression.ProtoReflect.Descriptor instead. +func (*AnyOrExpression) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{2} +} + +func (m *AnyOrExpression) GetOneof() isAnyOrExpression_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *AnyOrExpression) GetAny() *Any { + if x, ok := x.GetOneof().(*AnyOrExpression_Any); ok { + return x.Any + } + return nil +} + +func (x *AnyOrExpression) GetExpression() *Expression { + if x, ok := x.GetOneof().(*AnyOrExpression_Expression); ok { + return x.Expression + } + return nil +} + +type isAnyOrExpression_Oneof interface { + isAnyOrExpression_Oneof() +} + +type AnyOrExpression_Any struct { + Any *Any `protobuf:"bytes,1,opt,name=any,proto3,oneof"` +} + +type AnyOrExpression_Expression struct { + Expression *Expression `protobuf:"bytes,2,opt,name=expression,proto3,oneof"` +} + +func (*AnyOrExpression_Any) isAnyOrExpression_Oneof() {} + +func (*AnyOrExpression_Expression) isAnyOrExpression_Oneof() {} + +// A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation. +type Callback struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path []*NamedPathItem `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,2,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Callback) Reset() { + *x = Callback{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Callback) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Callback) ProtoMessage() {} + +func (x *Callback) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Callback.ProtoReflect.Descriptor instead. +func (*Callback) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{3} +} + +func (x *Callback) GetPath() []*NamedPathItem { + if x != nil { + return x.Path + } + return nil +} + +func (x *Callback) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type CallbackOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *CallbackOrReference_Callback + // *CallbackOrReference_Reference + Oneof isCallbackOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *CallbackOrReference) Reset() { + *x = CallbackOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallbackOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallbackOrReference) ProtoMessage() {} + +func (x *CallbackOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallbackOrReference.ProtoReflect.Descriptor instead. +func (*CallbackOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{4} +} + +func (m *CallbackOrReference) GetOneof() isCallbackOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *CallbackOrReference) GetCallback() *Callback { + if x, ok := x.GetOneof().(*CallbackOrReference_Callback); ok { + return x.Callback + } + return nil +} + +func (x *CallbackOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*CallbackOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isCallbackOrReference_Oneof interface { + isCallbackOrReference_Oneof() +} + +type CallbackOrReference_Callback struct { + Callback *Callback `protobuf:"bytes,1,opt,name=callback,proto3,oneof"` +} + +type CallbackOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*CallbackOrReference_Callback) isCallbackOrReference_Oneof() {} + +func (*CallbackOrReference_Reference) isCallbackOrReference_Oneof() {} + +type CallbacksOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedCallbackOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *CallbacksOrReferences) Reset() { + *x = CallbacksOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallbacksOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallbacksOrReferences) ProtoMessage() {} + +func (x *CallbacksOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallbacksOrReferences.ProtoReflect.Descriptor instead. +func (*CallbacksOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{5} +} + +func (x *CallbacksOrReferences) GetAdditionalProperties() []*NamedCallbackOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object. +type Components struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Schemas *SchemasOrReferences `protobuf:"bytes,1,opt,name=schemas,proto3" json:"schemas,omitempty"` + Responses *ResponsesOrReferences `protobuf:"bytes,2,opt,name=responses,proto3" json:"responses,omitempty"` + Parameters *ParametersOrReferences `protobuf:"bytes,3,opt,name=parameters,proto3" json:"parameters,omitempty"` + Examples *ExamplesOrReferences `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` + RequestBodies *RequestBodiesOrReferences `protobuf:"bytes,5,opt,name=request_bodies,json=requestBodies,proto3" json:"request_bodies,omitempty"` + Headers *HeadersOrReferences `protobuf:"bytes,6,opt,name=headers,proto3" json:"headers,omitempty"` + SecuritySchemes *SecuritySchemesOrReferences `protobuf:"bytes,7,opt,name=security_schemes,json=securitySchemes,proto3" json:"security_schemes,omitempty"` + Links *LinksOrReferences `protobuf:"bytes,8,opt,name=links,proto3" json:"links,omitempty"` + Callbacks *CallbacksOrReferences `protobuf:"bytes,9,opt,name=callbacks,proto3" json:"callbacks,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,10,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Components) Reset() { + *x = Components{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Components) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Components) ProtoMessage() {} + +func (x *Components) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Components.ProtoReflect.Descriptor instead. +func (*Components) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{6} +} + +func (x *Components) GetSchemas() *SchemasOrReferences { + if x != nil { + return x.Schemas + } + return nil +} + +func (x *Components) GetResponses() *ResponsesOrReferences { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Components) GetParameters() *ParametersOrReferences { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Components) GetExamples() *ExamplesOrReferences { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Components) GetRequestBodies() *RequestBodiesOrReferences { + if x != nil { + return x.RequestBodies + } + return nil +} + +func (x *Components) GetHeaders() *HeadersOrReferences { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Components) GetSecuritySchemes() *SecuritySchemesOrReferences { + if x != nil { + return x.SecuritySchemes + } + return nil +} + +func (x *Components) GetLinks() *LinksOrReferences { + if x != nil { + return x.Links + } + return nil +} + +func (x *Components) GetCallbacks() *CallbacksOrReferences { + if x != nil { + return x.Callbacks + } + return nil +} + +func (x *Components) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// Contact information for the exposed API. +type Contact struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Contact) Reset() { + *x = Contact{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Contact.ProtoReflect.Descriptor instead. +func (*Contact) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{7} +} + +func (x *Contact) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *Contact) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type DefaultType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *DefaultType_Number + // *DefaultType_Boolean + // *DefaultType_String_ + Oneof isDefaultType_Oneof `protobuf_oneof:"oneof"` +} + +func (x *DefaultType) Reset() { + *x = DefaultType{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DefaultType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DefaultType) ProtoMessage() {} + +func (x *DefaultType) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DefaultType.ProtoReflect.Descriptor instead. +func (*DefaultType) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{8} +} + +func (m *DefaultType) GetOneof() isDefaultType_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *DefaultType) GetNumber() float64 { + if x, ok := x.GetOneof().(*DefaultType_Number); ok { + return x.Number + } + return 0 +} + +func (x *DefaultType) GetBoolean() bool { + if x, ok := x.GetOneof().(*DefaultType_Boolean); ok { + return x.Boolean + } + return false +} + +func (x *DefaultType) GetString_() string { + if x, ok := x.GetOneof().(*DefaultType_String_); ok { + return x.String_ + } + return "" +} + +type isDefaultType_Oneof interface { + isDefaultType_Oneof() +} + +type DefaultType_Number struct { + Number float64 `protobuf:"fixed64,1,opt,name=number,proto3,oneof"` +} + +type DefaultType_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +} + +type DefaultType_String_ struct { + String_ string `protobuf:"bytes,3,opt,name=string,proto3,oneof"` +} + +func (*DefaultType_Number) isDefaultType_Oneof() {} + +func (*DefaultType_Boolean) isDefaultType_Oneof() {} + +func (*DefaultType_String_) isDefaultType_Oneof() {} + +// When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered. +type Discriminator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PropertyName string `protobuf:"bytes,1,opt,name=property_name,json=propertyName,proto3" json:"property_name,omitempty"` + Mapping *Strings `protobuf:"bytes,2,opt,name=mapping,proto3" json:"mapping,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Discriminator) Reset() { + *x = Discriminator{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Discriminator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Discriminator) ProtoMessage() {} + +func (x *Discriminator) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Discriminator.ProtoReflect.Descriptor instead. +func (*Discriminator) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{9} +} + +func (x *Discriminator) GetPropertyName() string { + if x != nil { + return x.PropertyName + } + return "" +} + +func (x *Discriminator) GetMapping() *Strings { + if x != nil { + return x.Mapping + } + return nil +} + +func (x *Discriminator) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type Document struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Openapi string `protobuf:"bytes,1,opt,name=openapi,proto3" json:"openapi,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + Servers []*Server `protobuf:"bytes,3,rep,name=servers,proto3" json:"servers,omitempty"` + Paths *Paths `protobuf:"bytes,4,opt,name=paths,proto3" json:"paths,omitempty"` + Components *Components `protobuf:"bytes,5,opt,name=components,proto3" json:"components,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,6,rep,name=security,proto3" json:"security,omitempty"` + Tags []*Tag `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,9,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{10} +} + +func (x *Document) GetOpenapi() string { + if x != nil { + return x.Openapi + } + return "" +} + +func (x *Document) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} + +func (x *Document) GetServers() []*Server { + if x != nil { + return x.Servers + } + return nil +} + +func (x *Document) GetPaths() *Paths { + if x != nil { + return x.Paths + } + return nil +} + +func (x *Document) GetComponents() *Components { + if x != nil { + return x.Components + } + return nil +} + +func (x *Document) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Document) GetTags() []*Tag { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Document) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Document) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// A single encoding definition applied to a single schema property. +type Encoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + Headers *HeadersOrReferences `protobuf:"bytes,2,opt,name=headers,proto3" json:"headers,omitempty"` + Style string `protobuf:"bytes,3,opt,name=style,proto3" json:"style,omitempty"` + Explode bool `protobuf:"varint,4,opt,name=explode,proto3" json:"explode,omitempty"` + AllowReserved bool `protobuf:"varint,5,opt,name=allow_reserved,json=allowReserved,proto3" json:"allow_reserved,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,6,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Encoding) Reset() { + *x = Encoding{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Encoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Encoding) ProtoMessage() {} + +func (x *Encoding) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Encoding.ProtoReflect.Descriptor instead. +func (*Encoding) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{11} +} + +func (x *Encoding) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *Encoding) GetHeaders() *HeadersOrReferences { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Encoding) GetStyle() string { + if x != nil { + return x.Style + } + return "" +} + +func (x *Encoding) GetExplode() bool { + if x != nil { + return x.Explode + } + return false +} + +func (x *Encoding) GetAllowReserved() bool { + if x != nil { + return x.AllowReserved + } + return false +} + +func (x *Encoding) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type Encodings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedEncoding `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Encodings) Reset() { + *x = Encodings{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Encodings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Encodings) ProtoMessage() {} + +func (x *Encodings) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Encodings.ProtoReflect.Descriptor instead. +func (*Encodings) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{12} +} + +func (x *Encodings) GetAdditionalProperties() []*NamedEncoding { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Example struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value *Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + ExternalValue string `protobuf:"bytes,4,opt,name=external_value,json=externalValue,proto3" json:"external_value,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Example) Reset() { + *x = Example{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Example) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Example) ProtoMessage() {} + +func (x *Example) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Example.ProtoReflect.Descriptor instead. +func (*Example) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{13} +} + +func (x *Example) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Example) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Example) GetValue() *Any { + if x != nil { + return x.Value + } + return nil +} + +func (x *Example) GetExternalValue() string { + if x != nil { + return x.ExternalValue + } + return "" +} + +func (x *Example) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type ExampleOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ExampleOrReference_Example + // *ExampleOrReference_Reference + Oneof isExampleOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ExampleOrReference) Reset() { + *x = ExampleOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExampleOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExampleOrReference) ProtoMessage() {} + +func (x *ExampleOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExampleOrReference.ProtoReflect.Descriptor instead. +func (*ExampleOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{14} +} + +func (m *ExampleOrReference) GetOneof() isExampleOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ExampleOrReference) GetExample() *Example { + if x, ok := x.GetOneof().(*ExampleOrReference_Example); ok { + return x.Example + } + return nil +} + +func (x *ExampleOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*ExampleOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isExampleOrReference_Oneof interface { + isExampleOrReference_Oneof() +} + +type ExampleOrReference_Example struct { + Example *Example `protobuf:"bytes,1,opt,name=example,proto3,oneof"` +} + +type ExampleOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*ExampleOrReference_Example) isExampleOrReference_Oneof() {} + +func (*ExampleOrReference_Reference) isExampleOrReference_Oneof() {} + +type ExamplesOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedExampleOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ExamplesOrReferences) Reset() { + *x = ExamplesOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExamplesOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExamplesOrReferences) ProtoMessage() {} + +func (x *ExamplesOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExamplesOrReferences.ProtoReflect.Descriptor instead. +func (*ExamplesOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{15} +} + +func (x *ExamplesOrReferences) GetAdditionalProperties() []*NamedExampleOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Expression struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Expression) Reset() { + *x = Expression{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expression) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expression) ProtoMessage() {} + +func (x *Expression) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expression.ProtoReflect.Descriptor instead. +func (*Expression) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{16} +} + +func (x *Expression) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Allows referencing an external resource for extended documentation. +type ExternalDocs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *ExternalDocs) Reset() { + *x = ExternalDocs{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExternalDocs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocs) ProtoMessage() {} + +func (x *ExternalDocs) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. +func (*ExternalDocs) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{17} +} + +func (x *ExternalDocs) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ExternalDocs) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *ExternalDocs) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`). +type Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,2,opt,name=required,proto3" json:"required,omitempty"` + Deprecated bool `protobuf:"varint,3,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + AllowEmptyValue bool `protobuf:"varint,4,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Style string `protobuf:"bytes,5,opt,name=style,proto3" json:"style,omitempty"` + Explode bool `protobuf:"varint,6,opt,name=explode,proto3" json:"explode,omitempty"` + AllowReserved bool `protobuf:"varint,7,opt,name=allow_reserved,json=allowReserved,proto3" json:"allow_reserved,omitempty"` + Schema *SchemaOrReference `protobuf:"bytes,8,opt,name=schema,proto3" json:"schema,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + Examples *ExamplesOrReferences `protobuf:"bytes,10,opt,name=examples,proto3" json:"examples,omitempty"` + Content *MediaTypes `protobuf:"bytes,11,opt,name=content,proto3" json:"content,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,12,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Header) Reset() { + *x = Header{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. +func (*Header) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{18} +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Header) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *Header) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Header) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue + } + return false +} + +func (x *Header) GetStyle() string { + if x != nil { + return x.Style + } + return "" +} + +func (x *Header) GetExplode() bool { + if x != nil { + return x.Explode + } + return false +} + +func (x *Header) GetAllowReserved() bool { + if x != nil { + return x.AllowReserved + } + return false +} + +func (x *Header) GetSchema() *SchemaOrReference { + if x != nil { + return x.Schema + } + return nil +} + +func (x *Header) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *Header) GetExamples() *ExamplesOrReferences { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Header) GetContent() *MediaTypes { + if x != nil { + return x.Content + } + return nil +} + +func (x *Header) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type HeaderOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *HeaderOrReference_Header + // *HeaderOrReference_Reference + Oneof isHeaderOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *HeaderOrReference) Reset() { + *x = HeaderOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderOrReference) ProtoMessage() {} + +func (x *HeaderOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderOrReference.ProtoReflect.Descriptor instead. +func (*HeaderOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{19} +} + +func (m *HeaderOrReference) GetOneof() isHeaderOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *HeaderOrReference) GetHeader() *Header { + if x, ok := x.GetOneof().(*HeaderOrReference_Header); ok { + return x.Header + } + return nil +} + +func (x *HeaderOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*HeaderOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isHeaderOrReference_Oneof interface { + isHeaderOrReference_Oneof() +} + +type HeaderOrReference_Header struct { + Header *Header `protobuf:"bytes,1,opt,name=header,proto3,oneof"` +} + +type HeaderOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*HeaderOrReference_Header) isHeaderOrReference_Oneof() {} + +func (*HeaderOrReference_Reference) isHeaderOrReference_Oneof() {} + +type HeadersOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedHeaderOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *HeadersOrReferences) Reset() { + *x = HeadersOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeadersOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeadersOrReferences) ProtoMessage() {} + +func (x *HeadersOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeadersOrReferences.ProtoReflect.Descriptor instead. +func (*HeadersOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{20} +} + +func (x *HeadersOrReferences) GetAdditionalProperties() []*NamedHeaderOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience. +type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"` + License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"` + Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,7,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` + Summary string `protobuf:"bytes,8,opt,name=summary,proto3" json:"summary,omitempty"` +} + +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{21} +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.License + } + return nil +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Info) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +func (x *Info) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +type ItemsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SchemaOrReference []*SchemaOrReference `protobuf:"bytes,1,rep,name=schema_or_reference,json=schemaOrReference,proto3" json:"schema_or_reference,omitempty"` +} + +func (x *ItemsItem) Reset() { + *x = ItemsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ItemsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ItemsItem) ProtoMessage() {} + +func (x *ItemsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. +func (*ItemsItem) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{22} +} + +func (x *ItemsItem) GetSchemaOrReference() []*SchemaOrReference { + if x != nil { + return x.SchemaOrReference + } + return nil +} + +// License information for the exposed API. +type License struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *License) Reset() { + *x = License{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use License.ProtoReflect.Descriptor instead. +func (*License) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{23} +} + +func (x *License) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *License) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation. +type Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OperationRef string `protobuf:"bytes,1,opt,name=operation_ref,json=operationRef,proto3" json:"operation_ref,omitempty"` + OperationId string `protobuf:"bytes,2,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + Parameters *AnyOrExpression `protobuf:"bytes,3,opt,name=parameters,proto3" json:"parameters,omitempty"` + RequestBody *AnyOrExpression `protobuf:"bytes,4,opt,name=request_body,json=requestBody,proto3" json:"request_body,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + Server *Server `protobuf:"bytes,6,opt,name=server,proto3" json:"server,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,7,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Link) Reset() { + *x = Link{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Link) ProtoMessage() {} + +func (x *Link) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Link.ProtoReflect.Descriptor instead. +func (*Link) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{24} +} + +func (x *Link) GetOperationRef() string { + if x != nil { + return x.OperationRef + } + return "" +} + +func (x *Link) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +func (x *Link) GetParameters() *AnyOrExpression { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Link) GetRequestBody() *AnyOrExpression { + if x != nil { + return x.RequestBody + } + return nil +} + +func (x *Link) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Link) GetServer() *Server { + if x != nil { + return x.Server + } + return nil +} + +func (x *Link) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type LinkOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *LinkOrReference_Link + // *LinkOrReference_Reference + Oneof isLinkOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *LinkOrReference) Reset() { + *x = LinkOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinkOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinkOrReference) ProtoMessage() {} + +func (x *LinkOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinkOrReference.ProtoReflect.Descriptor instead. +func (*LinkOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{25} +} + +func (m *LinkOrReference) GetOneof() isLinkOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *LinkOrReference) GetLink() *Link { + if x, ok := x.GetOneof().(*LinkOrReference_Link); ok { + return x.Link + } + return nil +} + +func (x *LinkOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*LinkOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isLinkOrReference_Oneof interface { + isLinkOrReference_Oneof() +} + +type LinkOrReference_Link struct { + Link *Link `protobuf:"bytes,1,opt,name=link,proto3,oneof"` +} + +type LinkOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*LinkOrReference_Link) isLinkOrReference_Oneof() {} + +func (*LinkOrReference_Reference) isLinkOrReference_Oneof() {} + +type LinksOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedLinkOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *LinksOrReferences) Reset() { + *x = LinksOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinksOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinksOrReferences) ProtoMessage() {} + +func (x *LinksOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinksOrReferences.ProtoReflect.Descriptor instead. +func (*LinksOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{26} +} + +func (x *LinksOrReferences) GetAdditionalProperties() []*NamedLinkOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Each Media Type Object provides schema and examples for the media type identified by its key. +type MediaType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Schema *SchemaOrReference `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` + Example *Any `protobuf:"bytes,2,opt,name=example,proto3" json:"example,omitempty"` + Examples *ExamplesOrReferences `protobuf:"bytes,3,opt,name=examples,proto3" json:"examples,omitempty"` + Encoding *Encodings `protobuf:"bytes,4,opt,name=encoding,proto3" json:"encoding,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *MediaType) Reset() { + *x = MediaType{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MediaType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MediaType) ProtoMessage() {} + +func (x *MediaType) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MediaType.ProtoReflect.Descriptor instead. +func (*MediaType) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{27} +} + +func (x *MediaType) GetSchema() *SchemaOrReference { + if x != nil { + return x.Schema + } + return nil +} + +func (x *MediaType) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *MediaType) GetExamples() *ExamplesOrReferences { + if x != nil { + return x.Examples + } + return nil +} + +func (x *MediaType) GetEncoding() *Encodings { + if x != nil { + return x.Encoding + } + return nil +} + +func (x *MediaType) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type MediaTypes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedMediaType `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *MediaTypes) Reset() { + *x = MediaTypes{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MediaTypes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MediaTypes) ProtoMessage() {} + +func (x *MediaTypes) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MediaTypes.ProtoReflect.Descriptor instead. +func (*MediaTypes) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{28} +} + +func (x *MediaTypes) GetAdditionalProperties() []*NamedMediaType { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedAny) Reset() { + *x = NamedAny{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedAny) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedAny) ProtoMessage() {} + +func (x *NamedAny) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. +func (*NamedAny) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{29} +} + +func (x *NamedAny) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedAny) GetValue() *Any { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of CallbackOrReference as ordered (name,value) pairs. +type NamedCallbackOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *CallbackOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedCallbackOrReference) Reset() { + *x = NamedCallbackOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedCallbackOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedCallbackOrReference) ProtoMessage() {} + +func (x *NamedCallbackOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedCallbackOrReference.ProtoReflect.Descriptor instead. +func (*NamedCallbackOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{30} +} + +func (x *NamedCallbackOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedCallbackOrReference) GetValue() *CallbackOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Encoding as ordered (name,value) pairs. +type NamedEncoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Encoding `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedEncoding) Reset() { + *x = NamedEncoding{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedEncoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedEncoding) ProtoMessage() {} + +func (x *NamedEncoding) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedEncoding.ProtoReflect.Descriptor instead. +func (*NamedEncoding) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{31} +} + +func (x *NamedEncoding) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedEncoding) GetValue() *Encoding { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ExampleOrReference as ordered (name,value) pairs. +type NamedExampleOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ExampleOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedExampleOrReference) Reset() { + *x = NamedExampleOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedExampleOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedExampleOrReference) ProtoMessage() {} + +func (x *NamedExampleOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedExampleOrReference.ProtoReflect.Descriptor instead. +func (*NamedExampleOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{32} +} + +func (x *NamedExampleOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedExampleOrReference) GetValue() *ExampleOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of HeaderOrReference as ordered (name,value) pairs. +type NamedHeaderOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *HeaderOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedHeaderOrReference) Reset() { + *x = NamedHeaderOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedHeaderOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedHeaderOrReference) ProtoMessage() {} + +func (x *NamedHeaderOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedHeaderOrReference.ProtoReflect.Descriptor instead. +func (*NamedHeaderOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{33} +} + +func (x *NamedHeaderOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedHeaderOrReference) GetValue() *HeaderOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of LinkOrReference as ordered (name,value) pairs. +type NamedLinkOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *LinkOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedLinkOrReference) Reset() { + *x = NamedLinkOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedLinkOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedLinkOrReference) ProtoMessage() {} + +func (x *NamedLinkOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedLinkOrReference.ProtoReflect.Descriptor instead. +func (*NamedLinkOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{34} +} + +func (x *NamedLinkOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedLinkOrReference) GetValue() *LinkOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of MediaType as ordered (name,value) pairs. +type NamedMediaType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *MediaType `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedMediaType) Reset() { + *x = NamedMediaType{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedMediaType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedMediaType) ProtoMessage() {} + +func (x *NamedMediaType) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedMediaType.ProtoReflect.Descriptor instead. +func (*NamedMediaType) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{35} +} + +func (x *NamedMediaType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedMediaType) GetValue() *MediaType { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ParameterOrReference as ordered (name,value) pairs. +type NamedParameterOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ParameterOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedParameterOrReference) Reset() { + *x = NamedParameterOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedParameterOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedParameterOrReference) ProtoMessage() {} + +func (x *NamedParameterOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedParameterOrReference.ProtoReflect.Descriptor instead. +func (*NamedParameterOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{36} +} + +func (x *NamedParameterOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedParameterOrReference) GetValue() *ParameterOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedPathItem) Reset() { + *x = NamedPathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedPathItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedPathItem) ProtoMessage() {} + +func (x *NamedPathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. +func (*NamedPathItem) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{37} +} + +func (x *NamedPathItem) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedPathItem) GetValue() *PathItem { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of RequestBodyOrReference as ordered (name,value) pairs. +type NamedRequestBodyOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *RequestBodyOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedRequestBodyOrReference) Reset() { + *x = NamedRequestBodyOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedRequestBodyOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedRequestBodyOrReference) ProtoMessage() {} + +func (x *NamedRequestBodyOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedRequestBodyOrReference.ProtoReflect.Descriptor instead. +func (*NamedRequestBodyOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{38} +} + +func (x *NamedRequestBodyOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedRequestBodyOrReference) GetValue() *RequestBodyOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseOrReference as ordered (name,value) pairs. +type NamedResponseOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ResponseOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedResponseOrReference) Reset() { + *x = NamedResponseOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedResponseOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedResponseOrReference) ProtoMessage() {} + +func (x *NamedResponseOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedResponseOrReference.ProtoReflect.Descriptor instead. +func (*NamedResponseOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{39} +} + +func (x *NamedResponseOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedResponseOrReference) GetValue() *ResponseOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SchemaOrReference as ordered (name,value) pairs. +type NamedSchemaOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *SchemaOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedSchemaOrReference) Reset() { + *x = NamedSchemaOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedSchemaOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedSchemaOrReference) ProtoMessage() {} + +func (x *NamedSchemaOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedSchemaOrReference.ProtoReflect.Descriptor instead. +func (*NamedSchemaOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{40} +} + +func (x *NamedSchemaOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedSchemaOrReference) GetValue() *SchemaOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecuritySchemeOrReference as ordered (name,value) pairs. +type NamedSecuritySchemeOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *SecuritySchemeOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedSecuritySchemeOrReference) Reset() { + *x = NamedSecuritySchemeOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedSecuritySchemeOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedSecuritySchemeOrReference) ProtoMessage() {} + +func (x *NamedSecuritySchemeOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedSecuritySchemeOrReference.ProtoReflect.Descriptor instead. +func (*NamedSecuritySchemeOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{41} +} + +func (x *NamedSecuritySchemeOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedSecuritySchemeOrReference) GetValue() *SecuritySchemeOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ServerVariable as ordered (name,value) pairs. +type NamedServerVariable struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ServerVariable `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedServerVariable) Reset() { + *x = NamedServerVariable{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedServerVariable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedServerVariable) ProtoMessage() {} + +func (x *NamedServerVariable) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedServerVariable.ProtoReflect.Descriptor instead. +func (*NamedServerVariable) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{42} +} + +func (x *NamedServerVariable) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedServerVariable) GetValue() *ServerVariable { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedString) Reset() { + *x = NamedString{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedString) ProtoMessage() {} + +func (x *NamedString) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. +func (*NamedString) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{43} +} + +func (x *NamedString) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedString) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedStringArray) Reset() { + *x = NamedStringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedStringArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedStringArray) ProtoMessage() {} + +func (x *NamedStringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. +func (*NamedStringArray) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{44} +} + +func (x *NamedStringArray) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedStringArray) GetValue() *StringArray { + if x != nil { + return x.Value + } + return nil +} + +// Configuration details for a supported OAuth Flow +type OauthFlow struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AuthorizationUrl string `protobuf:"bytes,1,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,2,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + RefreshUrl string `protobuf:"bytes,3,opt,name=refresh_url,json=refreshUrl,proto3" json:"refresh_url,omitempty"` + Scopes *Strings `protobuf:"bytes,4,opt,name=scopes,proto3" json:"scopes,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *OauthFlow) Reset() { + *x = OauthFlow{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OauthFlow) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OauthFlow) ProtoMessage() {} + +func (x *OauthFlow) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OauthFlow.ProtoReflect.Descriptor instead. +func (*OauthFlow) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{45} +} + +func (x *OauthFlow) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *OauthFlow) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *OauthFlow) GetRefreshUrl() string { + if x != nil { + return x.RefreshUrl + } + return "" +} + +func (x *OauthFlow) GetScopes() *Strings { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *OauthFlow) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// Allows configuration of the supported OAuth Flows. +type OauthFlows struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Implicit *OauthFlow `protobuf:"bytes,1,opt,name=implicit,proto3" json:"implicit,omitempty"` + Password *OauthFlow `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + ClientCredentials *OauthFlow `protobuf:"bytes,3,opt,name=client_credentials,json=clientCredentials,proto3" json:"client_credentials,omitempty"` + AuthorizationCode *OauthFlow `protobuf:"bytes,4,opt,name=authorization_code,json=authorizationCode,proto3" json:"authorization_code,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *OauthFlows) Reset() { + *x = OauthFlows{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OauthFlows) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OauthFlows) ProtoMessage() {} + +func (x *OauthFlows) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OauthFlows.ProtoReflect.Descriptor instead. +func (*OauthFlows) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{46} +} + +func (x *OauthFlows) GetImplicit() *OauthFlow { + if x != nil { + return x.Implicit + } + return nil +} + +func (x *OauthFlows) GetPassword() *OauthFlow { + if x != nil { + return x.Password + } + return nil +} + +func (x *OauthFlows) GetClientCredentials() *OauthFlow { + if x != nil { + return x.ClientCredentials + } + return nil +} + +func (x *OauthFlows) GetAuthorizationCode() *OauthFlow { + if x != nil { + return x.AuthorizationCode + } + return nil +} + +func (x *OauthFlows) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type Object struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Object) Reset() { + *x = Object{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Object) ProtoMessage() {} + +func (x *Object) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Object.ProtoReflect.Descriptor instead. +func (*Object) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{47} +} + +func (x *Object) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Describes a single API operation on a path. +type Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + Parameters []*ParameterOrReference `protobuf:"bytes,6,rep,name=parameters,proto3" json:"parameters,omitempty"` + RequestBody *RequestBodyOrReference `protobuf:"bytes,7,opt,name=request_body,json=requestBody,proto3" json:"request_body,omitempty"` + Responses *Responses `protobuf:"bytes,8,opt,name=responses,proto3" json:"responses,omitempty"` + Callbacks *CallbacksOrReferences `protobuf:"bytes,9,opt,name=callbacks,proto3" json:"callbacks,omitempty"` + Deprecated bool `protobuf:"varint,10,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,11,rep,name=security,proto3" json:"security,omitempty"` + Servers []*Server `protobuf:"bytes,12,rep,name=servers,proto3" json:"servers,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,13,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Operation) Reset() { + *x = Operation{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Operation.ProtoReflect.Descriptor instead. +func (*Operation) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{48} +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +func (x *Operation) GetParameters() []*ParameterOrReference { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Operation) GetRequestBody() *RequestBodyOrReference { + if x != nil { + return x.RequestBody + } + return nil +} + +func (x *Operation) GetResponses() *Responses { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Operation) GetCallbacks() *CallbacksOrReferences { + if x != nil { + return x.Callbacks + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Operation) GetServers() []*Server { + if x != nil { + return x.Servers + } + return nil +} + +func (x *Operation) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// Describes a single operation parameter. A unique parameter is defined by a combination of a name and location. +type Parameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Deprecated bool `protobuf:"varint,5,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + AllowEmptyValue bool `protobuf:"varint,6,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Style string `protobuf:"bytes,7,opt,name=style,proto3" json:"style,omitempty"` + Explode bool `protobuf:"varint,8,opt,name=explode,proto3" json:"explode,omitempty"` + AllowReserved bool `protobuf:"varint,9,opt,name=allow_reserved,json=allowReserved,proto3" json:"allow_reserved,omitempty"` + Schema *SchemaOrReference `protobuf:"bytes,10,opt,name=schema,proto3" json:"schema,omitempty"` + Example *Any `protobuf:"bytes,11,opt,name=example,proto3" json:"example,omitempty"` + Examples *ExamplesOrReferences `protobuf:"bytes,12,opt,name=examples,proto3" json:"examples,omitempty"` + Content *MediaTypes `protobuf:"bytes,13,opt,name=content,proto3" json:"content,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,14,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Parameter) Reset() { + *x = Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Parameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameter) ProtoMessage() {} + +func (x *Parameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. +func (*Parameter) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{49} +} + +func (x *Parameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Parameter) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *Parameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Parameter) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *Parameter) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Parameter) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue + } + return false +} + +func (x *Parameter) GetStyle() string { + if x != nil { + return x.Style + } + return "" +} + +func (x *Parameter) GetExplode() bool { + if x != nil { + return x.Explode + } + return false +} + +func (x *Parameter) GetAllowReserved() bool { + if x != nil { + return x.AllowReserved + } + return false +} + +func (x *Parameter) GetSchema() *SchemaOrReference { + if x != nil { + return x.Schema + } + return nil +} + +func (x *Parameter) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *Parameter) GetExamples() *ExamplesOrReferences { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Parameter) GetContent() *MediaTypes { + if x != nil { + return x.Content + } + return nil +} + +func (x *Parameter) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type ParameterOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ParameterOrReference_Parameter + // *ParameterOrReference_Reference + Oneof isParameterOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ParameterOrReference) Reset() { + *x = ParameterOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParameterOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParameterOrReference) ProtoMessage() {} + +func (x *ParameterOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParameterOrReference.ProtoReflect.Descriptor instead. +func (*ParameterOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{50} +} + +func (m *ParameterOrReference) GetOneof() isParameterOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ParameterOrReference) GetParameter() *Parameter { + if x, ok := x.GetOneof().(*ParameterOrReference_Parameter); ok { + return x.Parameter + } + return nil +} + +func (x *ParameterOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*ParameterOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isParameterOrReference_Oneof interface { + isParameterOrReference_Oneof() +} + +type ParameterOrReference_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` +} + +type ParameterOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*ParameterOrReference_Parameter) isParameterOrReference_Oneof() {} + +func (*ParameterOrReference_Reference) isParameterOrReference_Oneof() {} + +type ParametersOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedParameterOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ParametersOrReferences) Reset() { + *x = ParametersOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParametersOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParametersOrReferences) ProtoMessage() {} + +func (x *ParametersOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParametersOrReferences.ProtoReflect.Descriptor instead. +func (*ParametersOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{51} +} + +func (x *ParametersOrReferences) GetAdditionalProperties() []*NamedParameterOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available. +type PathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Get *Operation `protobuf:"bytes,4,opt,name=get,proto3" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,5,opt,name=put,proto3" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,6,opt,name=post,proto3" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,7,opt,name=delete,proto3" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,8,opt,name=options,proto3" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,9,opt,name=head,proto3" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,10,opt,name=patch,proto3" json:"patch,omitempty"` + Trace *Operation `protobuf:"bytes,11,opt,name=trace,proto3" json:"trace,omitempty"` + Servers []*Server `protobuf:"bytes,12,rep,name=servers,proto3" json:"servers,omitempty"` + Parameters []*ParameterOrReference `protobuf:"bytes,13,rep,name=parameters,proto3" json:"parameters,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,14,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *PathItem) Reset() { + *x = PathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathItem) ProtoMessage() {} + +func (x *PathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. +func (*PathItem) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{52} +} + +func (x *PathItem) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *PathItem) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *PathItem) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *PathItem) GetGet() *Operation { + if x != nil { + return x.Get + } + return nil +} + +func (x *PathItem) GetPut() *Operation { + if x != nil { + return x.Put + } + return nil +} + +func (x *PathItem) GetPost() *Operation { + if x != nil { + return x.Post + } + return nil +} + +func (x *PathItem) GetDelete() *Operation { + if x != nil { + return x.Delete + } + return nil +} + +func (x *PathItem) GetOptions() *Operation { + if x != nil { + return x.Options + } + return nil +} + +func (x *PathItem) GetHead() *Operation { + if x != nil { + return x.Head + } + return nil +} + +func (x *PathItem) GetPatch() *Operation { + if x != nil { + return x.Patch + } + return nil +} + +func (x *PathItem) GetTrace() *Operation { + if x != nil { + return x.Trace + } + return nil +} + +func (x *PathItem) GetServers() []*Server { + if x != nil { + return x.Servers + } + return nil +} + +func (x *PathItem) GetParameters() []*ParameterOrReference { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *PathItem) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints. +type Paths struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path []*NamedPathItem `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,2,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Paths) Reset() { + *x = Paths{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Paths) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Paths) ProtoMessage() {} + +func (x *Paths) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Paths.ProtoReflect.Descriptor instead. +func (*Paths) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{53} +} + +func (x *Paths) GetPath() []*NamedPathItem { + if x != nil { + return x.Path + } + return nil +} + +func (x *Paths) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type Properties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSchemaOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Properties) Reset() { + *x = Properties{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Properties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Properties) ProtoMessage() {} + +func (x *Properties) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Properties.ProtoReflect.Descriptor instead. +func (*Properties) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{54} +} + +func (x *Properties) GetAdditionalProperties() []*NamedSchemaOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification. +type Reference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` +} + +func (x *Reference) Reset() { + *x = Reference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reference) ProtoMessage() {} + +func (x *Reference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reference.ProtoReflect.Descriptor instead. +func (*Reference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{55} +} + +func (x *Reference) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +type RequestBodiesOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedRequestBodyOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *RequestBodiesOrReferences) Reset() { + *x = RequestBodiesOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBodiesOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBodiesOrReferences) ProtoMessage() {} + +func (x *RequestBodiesOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBodiesOrReferences.ProtoReflect.Descriptor instead. +func (*RequestBodiesOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{56} +} + +func (x *RequestBodiesOrReferences) GetAdditionalProperties() []*NamedRequestBodyOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Describes a single request body. +type RequestBody struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Content *MediaTypes `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` + Required bool `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *RequestBody) Reset() { + *x = RequestBody{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBody) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBody) ProtoMessage() {} + +func (x *RequestBody) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBody.ProtoReflect.Descriptor instead. +func (*RequestBody) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{57} +} + +func (x *RequestBody) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *RequestBody) GetContent() *MediaTypes { + if x != nil { + return x.Content + } + return nil +} + +func (x *RequestBody) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *RequestBody) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type RequestBodyOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *RequestBodyOrReference_RequestBody + // *RequestBodyOrReference_Reference + Oneof isRequestBodyOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *RequestBodyOrReference) Reset() { + *x = RequestBodyOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBodyOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBodyOrReference) ProtoMessage() {} + +func (x *RequestBodyOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBodyOrReference.ProtoReflect.Descriptor instead. +func (*RequestBodyOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{58} +} + +func (m *RequestBodyOrReference) GetOneof() isRequestBodyOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *RequestBodyOrReference) GetRequestBody() *RequestBody { + if x, ok := x.GetOneof().(*RequestBodyOrReference_RequestBody); ok { + return x.RequestBody + } + return nil +} + +func (x *RequestBodyOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*RequestBodyOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isRequestBodyOrReference_Oneof interface { + isRequestBodyOrReference_Oneof() +} + +type RequestBodyOrReference_RequestBody struct { + RequestBody *RequestBody `protobuf:"bytes,1,opt,name=request_body,json=requestBody,proto3,oneof"` +} + +type RequestBodyOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*RequestBodyOrReference_RequestBody) isRequestBodyOrReference_Oneof() {} + +func (*RequestBodyOrReference_Reference) isRequestBodyOrReference_Oneof() {} + +// Describes a single response from an API Operation, including design-time, static `links` to operations based on the response. +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Headers *HeadersOrReferences `protobuf:"bytes,2,opt,name=headers,proto3" json:"headers,omitempty"` + Content *MediaTypes `protobuf:"bytes,3,opt,name=content,proto3" json:"content,omitempty"` + Links *LinksOrReferences `protobuf:"bytes,4,opt,name=links,proto3" json:"links,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{59} +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Response) GetHeaders() *HeadersOrReferences { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Response) GetContent() *MediaTypes { + if x != nil { + return x.Content + } + return nil +} + +func (x *Response) GetLinks() *LinksOrReferences { + if x != nil { + return x.Links + } + return nil +} + +func (x *Response) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type ResponseOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ResponseOrReference_Response + // *ResponseOrReference_Reference + Oneof isResponseOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ResponseOrReference) Reset() { + *x = ResponseOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseOrReference) ProtoMessage() {} + +func (x *ResponseOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseOrReference.ProtoReflect.Descriptor instead. +func (*ResponseOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{60} +} + +func (m *ResponseOrReference) GetOneof() isResponseOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ResponseOrReference) GetResponse() *Response { + if x, ok := x.GetOneof().(*ResponseOrReference_Response); ok { + return x.Response + } + return nil +} + +func (x *ResponseOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*ResponseOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isResponseOrReference_Oneof interface { + isResponseOrReference_Oneof() +} + +type ResponseOrReference_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` +} + +type ResponseOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*ResponseOrReference_Response) isResponseOrReference_Oneof() {} + +func (*ResponseOrReference_Reference) isResponseOrReference_Oneof() {} + +// A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call. +type Responses struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Default *ResponseOrReference `protobuf:"bytes,1,opt,name=default,proto3" json:"default,omitempty"` + ResponseOrReference []*NamedResponseOrReference `protobuf:"bytes,2,rep,name=response_or_reference,json=responseOrReference,proto3" json:"response_or_reference,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Responses) Reset() { + *x = Responses{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Responses) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Responses) ProtoMessage() {} + +func (x *Responses) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Responses.ProtoReflect.Descriptor instead. +func (*Responses) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{61} +} + +func (x *Responses) GetDefault() *ResponseOrReference { + if x != nil { + return x.Default + } + return nil +} + +func (x *Responses) GetResponseOrReference() []*NamedResponseOrReference { + if x != nil { + return x.ResponseOrReference + } + return nil +} + +func (x *Responses) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type ResponsesOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedResponseOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ResponsesOrReferences) Reset() { + *x = ResponsesOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponsesOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponsesOrReferences) ProtoMessage() {} + +func (x *ResponsesOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponsesOrReferences.ProtoReflect.Descriptor instead. +func (*ResponsesOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{62} +} + +func (x *ResponsesOrReferences) GetAdditionalProperties() []*NamedResponseOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema. +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nullable bool `protobuf:"varint,1,opt,name=nullable,proto3" json:"nullable,omitempty"` + Discriminator *Discriminator `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + WriteOnly bool `protobuf:"varint,4,opt,name=write_only,json=writeOnly,proto3" json:"write_only,omitempty"` + Xml *Xml `protobuf:"bytes,5,opt,name=xml,proto3" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"` + Deprecated bool `protobuf:"varint,8,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Title string `protobuf:"bytes,9,opt,name=title,proto3" json:"title,omitempty"` + MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,21,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,22,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,23,rep,name=required,proto3" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,24,rep,name=enum,proto3" json:"enum,omitempty"` + Type string `protobuf:"bytes,25,opt,name=type,proto3" json:"type,omitempty"` + AllOf []*SchemaOrReference `protobuf:"bytes,26,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` + OneOf []*SchemaOrReference `protobuf:"bytes,27,rep,name=one_of,json=oneOf,proto3" json:"one_of,omitempty"` + AnyOf []*SchemaOrReference `protobuf:"bytes,28,rep,name=any_of,json=anyOf,proto3" json:"any_of,omitempty"` + Not *Schema `protobuf:"bytes,29,opt,name=not,proto3" json:"not,omitempty"` + Items *ItemsItem `protobuf:"bytes,30,opt,name=items,proto3" json:"items,omitempty"` + Properties *Properties `protobuf:"bytes,31,opt,name=properties,proto3" json:"properties,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,32,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + Default *DefaultType `protobuf:"bytes,33,opt,name=default,proto3" json:"default,omitempty"` + Description string `protobuf:"bytes,34,opt,name=description,proto3" json:"description,omitempty"` + Format string `protobuf:"bytes,35,opt,name=format,proto3" json:"format,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,36,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{63} +} + +func (x *Schema) GetNullable() bool { + if x != nil { + return x.Nullable + } + return false +} + +func (x *Schema) GetDiscriminator() *Discriminator { + if x != nil { + return x.Discriminator + } + return nil +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *Schema) GetWriteOnly() bool { + if x != nil { + return x.WriteOnly + } + return false +} + +func (x *Schema) GetXml() *Xml { + if x != nil { + return x.Xml + } + return nil +} + +func (x *Schema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *Schema) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Schema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Schema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *Schema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *Schema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *Schema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *Schema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *Schema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *Schema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *Schema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *Schema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *Schema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Schema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *Schema) GetMaxProperties() int64 { + if x != nil { + return x.MaxProperties + } + return 0 +} + +func (x *Schema) GetMinProperties() int64 { + if x != nil { + return x.MinProperties + } + return 0 +} + +func (x *Schema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *Schema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *Schema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Schema) GetAllOf() []*SchemaOrReference { + if x != nil { + return x.AllOf + } + return nil +} + +func (x *Schema) GetOneOf() []*SchemaOrReference { + if x != nil { + return x.OneOf + } + return nil +} + +func (x *Schema) GetAnyOf() []*SchemaOrReference { + if x != nil { + return x.AnyOf + } + return nil +} + +func (x *Schema) GetNot() *Schema { + if x != nil { + return x.Not + } + return nil +} + +func (x *Schema) GetItems() *ItemsItem { + if x != nil { + return x.Items + } + return nil +} + +func (x *Schema) GetProperties() *Properties { + if x != nil { + return x.Properties + } + return nil +} + +func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +func (x *Schema) GetDefault() *DefaultType { + if x != nil { + return x.Default + } + return nil +} + +func (x *Schema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Schema) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type SchemaOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SchemaOrReference_Schema + // *SchemaOrReference_Reference + Oneof isSchemaOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SchemaOrReference) Reset() { + *x = SchemaOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchemaOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemaOrReference) ProtoMessage() {} + +func (x *SchemaOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemaOrReference.ProtoReflect.Descriptor instead. +func (*SchemaOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{64} +} + +func (m *SchemaOrReference) GetOneof() isSchemaOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SchemaOrReference) GetSchema() *Schema { + if x, ok := x.GetOneof().(*SchemaOrReference_Schema); ok { + return x.Schema + } + return nil +} + +func (x *SchemaOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*SchemaOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isSchemaOrReference_Oneof interface { + isSchemaOrReference_Oneof() +} + +type SchemaOrReference_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +} + +type SchemaOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*SchemaOrReference_Schema) isSchemaOrReference_Oneof() {} + +func (*SchemaOrReference_Reference) isSchemaOrReference_Oneof() {} + +type SchemasOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSchemaOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SchemasOrReferences) Reset() { + *x = SchemasOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchemasOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemasOrReferences) ProtoMessage() {} + +func (x *SchemasOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemasOrReferences.ProtoReflect.Descriptor instead. +func (*SchemasOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{65} +} + +func (x *SchemasOrReferences) GetAdditionalProperties() []*NamedSchemaOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request. +type SecurityRequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{66} +} + +func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE. +type SecurityScheme struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,4,opt,name=in,proto3" json:"in,omitempty"` + Scheme string `protobuf:"bytes,5,opt,name=scheme,proto3" json:"scheme,omitempty"` + BearerFormat string `protobuf:"bytes,6,opt,name=bearer_format,json=bearerFormat,proto3" json:"bearer_format,omitempty"` + Flows *OauthFlows `protobuf:"bytes,7,opt,name=flows,proto3" json:"flows,omitempty"` + OpenIdConnectUrl string `protobuf:"bytes,8,opt,name=open_id_connect_url,json=openIdConnectUrl,proto3" json:"open_id_connect_url,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,9,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *SecurityScheme) Reset() { + *x = SecurityScheme{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityScheme) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityScheme) ProtoMessage() {} + +func (x *SecurityScheme) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityScheme.ProtoReflect.Descriptor instead. +func (*SecurityScheme) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{67} +} + +func (x *SecurityScheme) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *SecurityScheme) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *SecurityScheme) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SecurityScheme) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *SecurityScheme) GetScheme() string { + if x != nil { + return x.Scheme + } + return "" +} + +func (x *SecurityScheme) GetBearerFormat() string { + if x != nil { + return x.BearerFormat + } + return "" +} + +func (x *SecurityScheme) GetFlows() *OauthFlows { + if x != nil { + return x.Flows + } + return nil +} + +func (x *SecurityScheme) GetOpenIdConnectUrl() string { + if x != nil { + return x.OpenIdConnectUrl + } + return "" +} + +func (x *SecurityScheme) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type SecuritySchemeOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SecuritySchemeOrReference_SecurityScheme + // *SecuritySchemeOrReference_Reference + Oneof isSecuritySchemeOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SecuritySchemeOrReference) Reset() { + *x = SecuritySchemeOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecuritySchemeOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecuritySchemeOrReference) ProtoMessage() {} + +func (x *SecuritySchemeOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecuritySchemeOrReference.ProtoReflect.Descriptor instead. +func (*SecuritySchemeOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{68} +} + +func (m *SecuritySchemeOrReference) GetOneof() isSecuritySchemeOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SecuritySchemeOrReference) GetSecurityScheme() *SecurityScheme { + if x, ok := x.GetOneof().(*SecuritySchemeOrReference_SecurityScheme); ok { + return x.SecurityScheme + } + return nil +} + +func (x *SecuritySchemeOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*SecuritySchemeOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isSecuritySchemeOrReference_Oneof interface { + isSecuritySchemeOrReference_Oneof() +} + +type SecuritySchemeOrReference_SecurityScheme struct { + SecurityScheme *SecurityScheme `protobuf:"bytes,1,opt,name=security_scheme,json=securityScheme,proto3,oneof"` +} + +type SecuritySchemeOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*SecuritySchemeOrReference_SecurityScheme) isSecuritySchemeOrReference_Oneof() {} + +func (*SecuritySchemeOrReference_Reference) isSecuritySchemeOrReference_Oneof() {} + +type SecuritySchemesOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSecuritySchemeOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SecuritySchemesOrReferences) Reset() { + *x = SecuritySchemesOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecuritySchemesOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecuritySchemesOrReferences) ProtoMessage() {} + +func (x *SecuritySchemesOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecuritySchemesOrReferences.ProtoReflect.Descriptor instead. +func (*SecuritySchemesOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{69} +} + +func (x *SecuritySchemesOrReferences) GetAdditionalProperties() []*NamedSecuritySchemeOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// An object representing a Server. +type Server struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Variables *ServerVariables `protobuf:"bytes,3,opt,name=variables,proto3" json:"variables,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Server) Reset() { + *x = Server{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Server) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Server) ProtoMessage() {} + +func (x *Server) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Server.ProtoReflect.Descriptor instead. +func (*Server) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{70} +} + +func (x *Server) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Server) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Server) GetVariables() *ServerVariables { + if x != nil { + return x.Variables + } + return nil +} + +func (x *Server) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// An object representing a Server Variable for server URL template substitution. +type ServerVariable struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Enum []string `protobuf:"bytes,1,rep,name=enum,proto3" json:"enum,omitempty"` + Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *ServerVariable) Reset() { + *x = ServerVariable{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerVariable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerVariable) ProtoMessage() {} + +func (x *ServerVariable) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerVariable.ProtoReflect.Descriptor instead. +func (*ServerVariable) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{71} +} + +func (x *ServerVariable) GetEnum() []string { + if x != nil { + return x.Enum + } + return nil +} + +func (x *ServerVariable) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *ServerVariable) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ServerVariable) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type ServerVariables struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedServerVariable `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ServerVariables) Reset() { + *x = ServerVariables{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerVariables) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerVariables) ProtoMessage() {} + +func (x *ServerVariables) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerVariables.ProtoReflect.Descriptor instead. +func (*ServerVariables) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{72} +} + +func (x *ServerVariables) GetAdditionalProperties() []*NamedServerVariable { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Any property starting with x- is valid. +type SpecificationExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SpecificationExtension_Number + // *SpecificationExtension_Boolean + // *SpecificationExtension_String_ + Oneof isSpecificationExtension_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SpecificationExtension) Reset() { + *x = SpecificationExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpecificationExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpecificationExtension) ProtoMessage() {} + +func (x *SpecificationExtension) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpecificationExtension.ProtoReflect.Descriptor instead. +func (*SpecificationExtension) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{73} +} + +func (m *SpecificationExtension) GetOneof() isSpecificationExtension_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SpecificationExtension) GetNumber() float64 { + if x, ok := x.GetOneof().(*SpecificationExtension_Number); ok { + return x.Number + } + return 0 +} + +func (x *SpecificationExtension) GetBoolean() bool { + if x, ok := x.GetOneof().(*SpecificationExtension_Boolean); ok { + return x.Boolean + } + return false +} + +func (x *SpecificationExtension) GetString_() string { + if x, ok := x.GetOneof().(*SpecificationExtension_String_); ok { + return x.String_ + } + return "" +} + +type isSpecificationExtension_Oneof interface { + isSpecificationExtension_Oneof() +} + +type SpecificationExtension_Number struct { + Number float64 `protobuf:"fixed64,1,opt,name=number,proto3,oneof"` +} + +type SpecificationExtension_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +} + +type SpecificationExtension_String_ struct { + String_ string `protobuf:"bytes,3,opt,name=string,proto3,oneof"` +} + +func (*SpecificationExtension_Number) isSpecificationExtension_Oneof() {} + +func (*SpecificationExtension_Boolean) isSpecificationExtension_Oneof() {} + +func (*SpecificationExtension_String_) isSpecificationExtension_Oneof() {} + +type StringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` +} + +func (x *StringArray) Reset() { + *x = StringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringArray) ProtoMessage() {} + +func (x *StringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. +func (*StringArray) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{74} +} + +func (x *StringArray) GetValue() []string { + if x != nil { + return x.Value + } + return nil +} + +type Strings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Strings) Reset() { + *x = Strings{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Strings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Strings) ProtoMessage() {} + +func (x *Strings) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Strings.ProtoReflect.Descriptor instead. +func (*Strings) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{75} +} + +func (x *Strings) GetAdditionalProperties() []*NamedString { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances. +type Tag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Tag) Reset() { + *x = Tag{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tag.ProtoReflect.Descriptor instead. +func (*Tag) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{76} +} + +func (x *Tag) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Tag) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior. +type Xml struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,6,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Xml) Reset() { + *x = Xml{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Xml) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Xml) ProtoMessage() {} + +func (x *Xml) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Xml.ProtoReflect.Descriptor instead. +func (*Xml) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{77} +} + +func (x *Xml) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Xml) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Xml) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *Xml) GetAttribute() bool { + if x != nil { + return x.Attribute + } + return false +} + +func (x *Xml) GetWrapped() bool { + if x != nil { + return x.Wrapped + } + return false +} + +func (x *Xml) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +var File_openapiv3_OpenAPIv3_proto protoreflect.FileDescriptor + +var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, + 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, + 0x4f, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x12, 0x1a, 0x0a, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0x79, 0x0a, 0x0f, + 0x41, 0x6e, 0x79, 0x4f, 0x72, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x23, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, + 0x03, 0x61, 0x6e, 0x79, 0x12, 0x38, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x07, + 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x88, 0x01, 0x0a, 0x08, 0x43, 0x61, 0x6c, 0x6c, + 0x62, 0x61, 0x63, 0x6b, 0x12, 0x2d, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x13, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4f, + 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x63, 0x61, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, + 0x63, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x12, 0x35, + 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x72, + 0x0a, 0x15, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x22, 0xac, 0x05, 0x0a, 0x0a, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x39, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x73, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x3f, 0x0a, 0x09, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x42, 0x0a, + 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, + 0x4c, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, 0x64, 0x69, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x69, + 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0d, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x52, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x4f, + 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0f, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x05, + 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x4f, 0x72, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x12, 0x3f, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x75, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, + 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x66, 0x0a, 0x0b, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x1a, 0x0a, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x18, 0x0a, + 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x22, 0xb2, 0x01, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, + 0x6f, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, + 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x07, 0x6d, + 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xc9, 0x03, 0x0a, 0x08, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x12, 0x24, 0x0a, 0x04, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x2c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, + 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, + 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x12, 0x36, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x23, + 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, + 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, + 0x63, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x8e, 0x02, 0x0a, 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x39, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x73, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x79, + 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x12, 0x25, 0x0a, 0x0e, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x5b, 0x0a, 0x09, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, + 0xe2, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x65, + 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x48, 0x00, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x35, 0x0a, 0x09, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x70, 0x0a, 0x14, + 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4f, 0x72, 0x52, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x57, + 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x0c, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x4d, 0x0a, 0x17, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8a, 0x04, 0x0a, 0x06, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, + 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x29, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, + 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, + 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, + 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x11, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2c, + 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x09, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x6e, 0x0a, 0x13, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc9, 0x02, 0x0a, + 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, + 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, + 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5a, 0x0a, 0x09, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x4d, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x22, 0x7e, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xe8, 0x02, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x23, 0x0a, + 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x66, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x4f, 0x72, 0x45, 0x78, 0x70, 0x72, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, + 0x64, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x4f, 0x72, 0x45, 0x78, 0x70, 0x72, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x79, 0x0a, 0x0f, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x12, 0x26, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x6a, 0x0a, 0x11, 0x4c, 0x69, + 0x6e, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xad, 0x02, 0x0a, 0x09, 0x4d, 0x65, 0x64, 0x69, 0x61, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x29, 0x0a, 0x07, 0x65, + 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, + 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, 0x72, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x08, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x0a, 0x0a, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, + 0x79, 0x70, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x45, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x65, 0x0a, 0x18, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4f, 0x72, 0x52, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x63, 0x0a, 0x17, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x78, 0x61, + 0x6d, 0x70, 0x6c, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x61, 0x0a, 0x16, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5d, 0x0a, 0x14, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x67, + 0x0a, 0x19, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, + 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x6b, 0x0a, 0x1b, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, + 0x6f, 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x65, 0x0a, 0x18, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x61, 0x0a, 0x16, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x71, 0x0a, 0x1e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, + 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x5b, 0x0a, 0x13, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x37, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0xf2, 0x01, 0x0a, 0x09, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x55, 0x72, 0x6c, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xcd, 0x02, 0x0a, 0x0a, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, + 0x6f, 0x77, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x08, 0x69, 0x6d, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x52, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x44, 0x0a, 0x12, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x11, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, + 0x44, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, + 0x6f, 0x77, 0x52, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x53, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x49, + 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x96, 0x05, 0x0a, 0x09, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, + 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x45, 0x0a, 0x0c, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x33, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x62, + 0x61, 0x63, 0x6b, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, + 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x09, 0x63, + 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, + 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xb1, 0x04, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, + 0x74, 0x79, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x29, 0x0a, 0x07, + 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, + 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, + 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, 0x78, 0x61, + 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x07, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8d, 0x01, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, + 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, + 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x16, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, + 0x12, 0x5a, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xfa, 0x04, 0x0a, + 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, + 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, + 0x74, 0x12, 0x27, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, + 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, + 0x12, 0x2b, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2b, 0x0a, + 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0a, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x0a, 0x05, 0x50, 0x61, + 0x74, 0x68, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x65, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x57, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x1e, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x22, 0x79, 0x0a, 0x19, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, + 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, + 0x6f, 0x64, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x07, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x64, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x96, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, + 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x3c, 0x0a, + 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x48, 0x00, 0x52, 0x0b, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9d, 0x02, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x07, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x07, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x07, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x4d, 0x0a, 0x17, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x13, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, + 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xef, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x12, 0x58, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6f, 0x72, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, + 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x0a, 0x15, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x73, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xaf, 0x0b, + 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6e, 0x75, 0x6c, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x75, 0x6c, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, + 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, + 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, + 0x6c, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x6e, 0x6c, + 0x79, 0x12, 0x21, 0x0a, 0x03, 0x78, 0x6d, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x58, 0x6d, 0x6c, 0x52, + 0x03, 0x78, 0x6d, 0x6c, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1e, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, + 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x18, 0x17, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x61, + 0x6c, 0x6c, 0x5f, 0x6f, 0x66, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, + 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, + 0x66, 0x12, 0x34, 0x0a, 0x06, 0x6f, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x1b, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x05, 0x6f, 0x6e, 0x65, 0x4f, 0x66, 0x12, 0x34, 0x0a, 0x06, 0x61, 0x6e, 0x79, 0x5f, 0x6f, + 0x66, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x61, 0x6e, 0x79, 0x4f, 0x66, 0x12, 0x24, 0x0a, + 0x03, 0x6e, 0x6f, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x03, + 0x6e, 0x6f, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x1e, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x1f, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x81, 0x01, 0x0a, 0x11, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, + 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x22, 0x6e, 0x0a, 0x13, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x4f, 0x72, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x15, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x22, 0x68, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xd3, 0x02, + 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, + 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x2c, 0x0a, 0x05, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x69, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x49, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x55, 0x72, 0x6c, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, + 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x7e, 0x0a, 0x1b, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0xaf, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, 0x72, 0x69, + 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0x67, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x71, 0x0a, 0x16, + 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x1a, 0x0a, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x06, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, + 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x57, 0x0a, 0x07, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x4c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc9, 0x01, + 0x0a, 0x03, 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xd6, 0x01, 0x0a, 0x03, 0x58, 0x6d, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x64, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x33, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, + 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_openapiv3_OpenAPIv3_proto_rawDescOnce sync.Once + file_openapiv3_OpenAPIv3_proto_rawDescData = file_openapiv3_OpenAPIv3_proto_rawDesc +) + +func file_openapiv3_OpenAPIv3_proto_rawDescGZIP() []byte { + file_openapiv3_OpenAPIv3_proto_rawDescOnce.Do(func() { + file_openapiv3_OpenAPIv3_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv3_OpenAPIv3_proto_rawDescData) + }) + return file_openapiv3_OpenAPIv3_proto_rawDescData +} + +var file_openapiv3_OpenAPIv3_proto_msgTypes = make([]protoimpl.MessageInfo, 78) +var file_openapiv3_OpenAPIv3_proto_goTypes = []interface{}{ + (*AdditionalPropertiesItem)(nil), // 0: openapi.v3.AdditionalPropertiesItem + (*Any)(nil), // 1: openapi.v3.Any + (*AnyOrExpression)(nil), // 2: openapi.v3.AnyOrExpression + (*Callback)(nil), // 3: openapi.v3.Callback + (*CallbackOrReference)(nil), // 4: openapi.v3.CallbackOrReference + (*CallbacksOrReferences)(nil), // 5: openapi.v3.CallbacksOrReferences + (*Components)(nil), // 6: openapi.v3.Components + (*Contact)(nil), // 7: openapi.v3.Contact + (*DefaultType)(nil), // 8: openapi.v3.DefaultType + (*Discriminator)(nil), // 9: openapi.v3.Discriminator + (*Document)(nil), // 10: openapi.v3.Document + (*Encoding)(nil), // 11: openapi.v3.Encoding + (*Encodings)(nil), // 12: openapi.v3.Encodings + (*Example)(nil), // 13: openapi.v3.Example + (*ExampleOrReference)(nil), // 14: openapi.v3.ExampleOrReference + (*ExamplesOrReferences)(nil), // 15: openapi.v3.ExamplesOrReferences + (*Expression)(nil), // 16: openapi.v3.Expression + (*ExternalDocs)(nil), // 17: openapi.v3.ExternalDocs + (*Header)(nil), // 18: openapi.v3.Header + (*HeaderOrReference)(nil), // 19: openapi.v3.HeaderOrReference + (*HeadersOrReferences)(nil), // 20: openapi.v3.HeadersOrReferences + (*Info)(nil), // 21: openapi.v3.Info + (*ItemsItem)(nil), // 22: openapi.v3.ItemsItem + (*License)(nil), // 23: openapi.v3.License + (*Link)(nil), // 24: openapi.v3.Link + (*LinkOrReference)(nil), // 25: openapi.v3.LinkOrReference + (*LinksOrReferences)(nil), // 26: openapi.v3.LinksOrReferences + (*MediaType)(nil), // 27: openapi.v3.MediaType + (*MediaTypes)(nil), // 28: openapi.v3.MediaTypes + (*NamedAny)(nil), // 29: openapi.v3.NamedAny + (*NamedCallbackOrReference)(nil), // 30: openapi.v3.NamedCallbackOrReference + (*NamedEncoding)(nil), // 31: openapi.v3.NamedEncoding + (*NamedExampleOrReference)(nil), // 32: openapi.v3.NamedExampleOrReference + (*NamedHeaderOrReference)(nil), // 33: openapi.v3.NamedHeaderOrReference + (*NamedLinkOrReference)(nil), // 34: openapi.v3.NamedLinkOrReference + (*NamedMediaType)(nil), // 35: openapi.v3.NamedMediaType + (*NamedParameterOrReference)(nil), // 36: openapi.v3.NamedParameterOrReference + (*NamedPathItem)(nil), // 37: openapi.v3.NamedPathItem + (*NamedRequestBodyOrReference)(nil), // 38: openapi.v3.NamedRequestBodyOrReference + (*NamedResponseOrReference)(nil), // 39: openapi.v3.NamedResponseOrReference + (*NamedSchemaOrReference)(nil), // 40: openapi.v3.NamedSchemaOrReference + (*NamedSecuritySchemeOrReference)(nil), // 41: openapi.v3.NamedSecuritySchemeOrReference + (*NamedServerVariable)(nil), // 42: openapi.v3.NamedServerVariable + (*NamedString)(nil), // 43: openapi.v3.NamedString + (*NamedStringArray)(nil), // 44: openapi.v3.NamedStringArray + (*OauthFlow)(nil), // 45: openapi.v3.OauthFlow + (*OauthFlows)(nil), // 46: openapi.v3.OauthFlows + (*Object)(nil), // 47: openapi.v3.Object + (*Operation)(nil), // 48: openapi.v3.Operation + (*Parameter)(nil), // 49: openapi.v3.Parameter + (*ParameterOrReference)(nil), // 50: openapi.v3.ParameterOrReference + (*ParametersOrReferences)(nil), // 51: openapi.v3.ParametersOrReferences + (*PathItem)(nil), // 52: openapi.v3.PathItem + (*Paths)(nil), // 53: openapi.v3.Paths + (*Properties)(nil), // 54: openapi.v3.Properties + (*Reference)(nil), // 55: openapi.v3.Reference + (*RequestBodiesOrReferences)(nil), // 56: openapi.v3.RequestBodiesOrReferences + (*RequestBody)(nil), // 57: openapi.v3.RequestBody + (*RequestBodyOrReference)(nil), // 58: openapi.v3.RequestBodyOrReference + (*Response)(nil), // 59: openapi.v3.Response + (*ResponseOrReference)(nil), // 60: openapi.v3.ResponseOrReference + (*Responses)(nil), // 61: openapi.v3.Responses + (*ResponsesOrReferences)(nil), // 62: openapi.v3.ResponsesOrReferences + (*Schema)(nil), // 63: openapi.v3.Schema + (*SchemaOrReference)(nil), // 64: openapi.v3.SchemaOrReference + (*SchemasOrReferences)(nil), // 65: openapi.v3.SchemasOrReferences + (*SecurityRequirement)(nil), // 66: openapi.v3.SecurityRequirement + (*SecurityScheme)(nil), // 67: openapi.v3.SecurityScheme + (*SecuritySchemeOrReference)(nil), // 68: openapi.v3.SecuritySchemeOrReference + (*SecuritySchemesOrReferences)(nil), // 69: openapi.v3.SecuritySchemesOrReferences + (*Server)(nil), // 70: openapi.v3.Server + (*ServerVariable)(nil), // 71: openapi.v3.ServerVariable + (*ServerVariables)(nil), // 72: openapi.v3.ServerVariables + (*SpecificationExtension)(nil), // 73: openapi.v3.SpecificationExtension + (*StringArray)(nil), // 74: openapi.v3.StringArray + (*Strings)(nil), // 75: openapi.v3.Strings + (*Tag)(nil), // 76: openapi.v3.Tag + (*Xml)(nil), // 77: openapi.v3.Xml + (*anypb.Any)(nil), // 78: google.protobuf.Any +} +var file_openapiv3_OpenAPIv3_proto_depIdxs = []int32{ + 64, // 0: openapi.v3.AdditionalPropertiesItem.schema_or_reference:type_name -> openapi.v3.SchemaOrReference + 78, // 1: openapi.v3.Any.value:type_name -> google.protobuf.Any + 1, // 2: openapi.v3.AnyOrExpression.any:type_name -> openapi.v3.Any + 16, // 3: openapi.v3.AnyOrExpression.expression:type_name -> openapi.v3.Expression + 37, // 4: openapi.v3.Callback.path:type_name -> openapi.v3.NamedPathItem + 29, // 5: openapi.v3.Callback.specification_extension:type_name -> openapi.v3.NamedAny + 3, // 6: openapi.v3.CallbackOrReference.callback:type_name -> openapi.v3.Callback + 55, // 7: openapi.v3.CallbackOrReference.reference:type_name -> openapi.v3.Reference + 30, // 8: openapi.v3.CallbacksOrReferences.additional_properties:type_name -> openapi.v3.NamedCallbackOrReference + 65, // 9: openapi.v3.Components.schemas:type_name -> openapi.v3.SchemasOrReferences + 62, // 10: openapi.v3.Components.responses:type_name -> openapi.v3.ResponsesOrReferences + 51, // 11: openapi.v3.Components.parameters:type_name -> openapi.v3.ParametersOrReferences + 15, // 12: openapi.v3.Components.examples:type_name -> openapi.v3.ExamplesOrReferences + 56, // 13: openapi.v3.Components.request_bodies:type_name -> openapi.v3.RequestBodiesOrReferences + 20, // 14: openapi.v3.Components.headers:type_name -> openapi.v3.HeadersOrReferences + 69, // 15: openapi.v3.Components.security_schemes:type_name -> openapi.v3.SecuritySchemesOrReferences + 26, // 16: openapi.v3.Components.links:type_name -> openapi.v3.LinksOrReferences + 5, // 17: openapi.v3.Components.callbacks:type_name -> openapi.v3.CallbacksOrReferences + 29, // 18: openapi.v3.Components.specification_extension:type_name -> openapi.v3.NamedAny + 29, // 19: openapi.v3.Contact.specification_extension:type_name -> openapi.v3.NamedAny + 75, // 20: openapi.v3.Discriminator.mapping:type_name -> openapi.v3.Strings + 29, // 21: openapi.v3.Discriminator.specification_extension:type_name -> openapi.v3.NamedAny + 21, // 22: openapi.v3.Document.info:type_name -> openapi.v3.Info + 70, // 23: openapi.v3.Document.servers:type_name -> openapi.v3.Server + 53, // 24: openapi.v3.Document.paths:type_name -> openapi.v3.Paths + 6, // 25: openapi.v3.Document.components:type_name -> openapi.v3.Components + 66, // 26: openapi.v3.Document.security:type_name -> openapi.v3.SecurityRequirement + 76, // 27: openapi.v3.Document.tags:type_name -> openapi.v3.Tag + 17, // 28: openapi.v3.Document.external_docs:type_name -> openapi.v3.ExternalDocs + 29, // 29: openapi.v3.Document.specification_extension:type_name -> openapi.v3.NamedAny + 20, // 30: openapi.v3.Encoding.headers:type_name -> openapi.v3.HeadersOrReferences + 29, // 31: openapi.v3.Encoding.specification_extension:type_name -> openapi.v3.NamedAny + 31, // 32: openapi.v3.Encodings.additional_properties:type_name -> openapi.v3.NamedEncoding + 1, // 33: openapi.v3.Example.value:type_name -> openapi.v3.Any + 29, // 34: openapi.v3.Example.specification_extension:type_name -> openapi.v3.NamedAny + 13, // 35: openapi.v3.ExampleOrReference.example:type_name -> openapi.v3.Example + 55, // 36: openapi.v3.ExampleOrReference.reference:type_name -> openapi.v3.Reference + 32, // 37: openapi.v3.ExamplesOrReferences.additional_properties:type_name -> openapi.v3.NamedExampleOrReference + 29, // 38: openapi.v3.Expression.additional_properties:type_name -> openapi.v3.NamedAny + 29, // 39: openapi.v3.ExternalDocs.specification_extension:type_name -> openapi.v3.NamedAny + 64, // 40: openapi.v3.Header.schema:type_name -> openapi.v3.SchemaOrReference + 1, // 41: openapi.v3.Header.example:type_name -> openapi.v3.Any + 15, // 42: openapi.v3.Header.examples:type_name -> openapi.v3.ExamplesOrReferences + 28, // 43: openapi.v3.Header.content:type_name -> openapi.v3.MediaTypes + 29, // 44: openapi.v3.Header.specification_extension:type_name -> openapi.v3.NamedAny + 18, // 45: openapi.v3.HeaderOrReference.header:type_name -> openapi.v3.Header + 55, // 46: openapi.v3.HeaderOrReference.reference:type_name -> openapi.v3.Reference + 33, // 47: openapi.v3.HeadersOrReferences.additional_properties:type_name -> openapi.v3.NamedHeaderOrReference + 7, // 48: openapi.v3.Info.contact:type_name -> openapi.v3.Contact + 23, // 49: openapi.v3.Info.license:type_name -> openapi.v3.License + 29, // 50: openapi.v3.Info.specification_extension:type_name -> openapi.v3.NamedAny + 64, // 51: openapi.v3.ItemsItem.schema_or_reference:type_name -> openapi.v3.SchemaOrReference + 29, // 52: openapi.v3.License.specification_extension:type_name -> openapi.v3.NamedAny + 2, // 53: openapi.v3.Link.parameters:type_name -> openapi.v3.AnyOrExpression + 2, // 54: openapi.v3.Link.request_body:type_name -> openapi.v3.AnyOrExpression + 70, // 55: openapi.v3.Link.server:type_name -> openapi.v3.Server + 29, // 56: openapi.v3.Link.specification_extension:type_name -> openapi.v3.NamedAny + 24, // 57: openapi.v3.LinkOrReference.link:type_name -> openapi.v3.Link + 55, // 58: openapi.v3.LinkOrReference.reference:type_name -> openapi.v3.Reference + 34, // 59: openapi.v3.LinksOrReferences.additional_properties:type_name -> openapi.v3.NamedLinkOrReference + 64, // 60: openapi.v3.MediaType.schema:type_name -> openapi.v3.SchemaOrReference + 1, // 61: openapi.v3.MediaType.example:type_name -> openapi.v3.Any + 15, // 62: openapi.v3.MediaType.examples:type_name -> openapi.v3.ExamplesOrReferences + 12, // 63: openapi.v3.MediaType.encoding:type_name -> openapi.v3.Encodings + 29, // 64: openapi.v3.MediaType.specification_extension:type_name -> openapi.v3.NamedAny + 35, // 65: openapi.v3.MediaTypes.additional_properties:type_name -> openapi.v3.NamedMediaType + 1, // 66: openapi.v3.NamedAny.value:type_name -> openapi.v3.Any + 4, // 67: openapi.v3.NamedCallbackOrReference.value:type_name -> openapi.v3.CallbackOrReference + 11, // 68: openapi.v3.NamedEncoding.value:type_name -> openapi.v3.Encoding + 14, // 69: openapi.v3.NamedExampleOrReference.value:type_name -> openapi.v3.ExampleOrReference + 19, // 70: openapi.v3.NamedHeaderOrReference.value:type_name -> openapi.v3.HeaderOrReference + 25, // 71: openapi.v3.NamedLinkOrReference.value:type_name -> openapi.v3.LinkOrReference + 27, // 72: openapi.v3.NamedMediaType.value:type_name -> openapi.v3.MediaType + 50, // 73: openapi.v3.NamedParameterOrReference.value:type_name -> openapi.v3.ParameterOrReference + 52, // 74: openapi.v3.NamedPathItem.value:type_name -> openapi.v3.PathItem + 58, // 75: openapi.v3.NamedRequestBodyOrReference.value:type_name -> openapi.v3.RequestBodyOrReference + 60, // 76: openapi.v3.NamedResponseOrReference.value:type_name -> openapi.v3.ResponseOrReference + 64, // 77: openapi.v3.NamedSchemaOrReference.value:type_name -> openapi.v3.SchemaOrReference + 68, // 78: openapi.v3.NamedSecuritySchemeOrReference.value:type_name -> openapi.v3.SecuritySchemeOrReference + 71, // 79: openapi.v3.NamedServerVariable.value:type_name -> openapi.v3.ServerVariable + 74, // 80: openapi.v3.NamedStringArray.value:type_name -> openapi.v3.StringArray + 75, // 81: openapi.v3.OauthFlow.scopes:type_name -> openapi.v3.Strings + 29, // 82: openapi.v3.OauthFlow.specification_extension:type_name -> openapi.v3.NamedAny + 45, // 83: openapi.v3.OauthFlows.implicit:type_name -> openapi.v3.OauthFlow + 45, // 84: openapi.v3.OauthFlows.password:type_name -> openapi.v3.OauthFlow + 45, // 85: openapi.v3.OauthFlows.client_credentials:type_name -> openapi.v3.OauthFlow + 45, // 86: openapi.v3.OauthFlows.authorization_code:type_name -> openapi.v3.OauthFlow + 29, // 87: openapi.v3.OauthFlows.specification_extension:type_name -> openapi.v3.NamedAny + 29, // 88: openapi.v3.Object.additional_properties:type_name -> openapi.v3.NamedAny + 17, // 89: openapi.v3.Operation.external_docs:type_name -> openapi.v3.ExternalDocs + 50, // 90: openapi.v3.Operation.parameters:type_name -> openapi.v3.ParameterOrReference + 58, // 91: openapi.v3.Operation.request_body:type_name -> openapi.v3.RequestBodyOrReference + 61, // 92: openapi.v3.Operation.responses:type_name -> openapi.v3.Responses + 5, // 93: openapi.v3.Operation.callbacks:type_name -> openapi.v3.CallbacksOrReferences + 66, // 94: openapi.v3.Operation.security:type_name -> openapi.v3.SecurityRequirement + 70, // 95: openapi.v3.Operation.servers:type_name -> openapi.v3.Server + 29, // 96: openapi.v3.Operation.specification_extension:type_name -> openapi.v3.NamedAny + 64, // 97: openapi.v3.Parameter.schema:type_name -> openapi.v3.SchemaOrReference + 1, // 98: openapi.v3.Parameter.example:type_name -> openapi.v3.Any + 15, // 99: openapi.v3.Parameter.examples:type_name -> openapi.v3.ExamplesOrReferences + 28, // 100: openapi.v3.Parameter.content:type_name -> openapi.v3.MediaTypes + 29, // 101: openapi.v3.Parameter.specification_extension:type_name -> openapi.v3.NamedAny + 49, // 102: openapi.v3.ParameterOrReference.parameter:type_name -> openapi.v3.Parameter + 55, // 103: openapi.v3.ParameterOrReference.reference:type_name -> openapi.v3.Reference + 36, // 104: openapi.v3.ParametersOrReferences.additional_properties:type_name -> openapi.v3.NamedParameterOrReference + 48, // 105: openapi.v3.PathItem.get:type_name -> openapi.v3.Operation + 48, // 106: openapi.v3.PathItem.put:type_name -> openapi.v3.Operation + 48, // 107: openapi.v3.PathItem.post:type_name -> openapi.v3.Operation + 48, // 108: openapi.v3.PathItem.delete:type_name -> openapi.v3.Operation + 48, // 109: openapi.v3.PathItem.options:type_name -> openapi.v3.Operation + 48, // 110: openapi.v3.PathItem.head:type_name -> openapi.v3.Operation + 48, // 111: openapi.v3.PathItem.patch:type_name -> openapi.v3.Operation + 48, // 112: openapi.v3.PathItem.trace:type_name -> openapi.v3.Operation + 70, // 113: openapi.v3.PathItem.servers:type_name -> openapi.v3.Server + 50, // 114: openapi.v3.PathItem.parameters:type_name -> openapi.v3.ParameterOrReference + 29, // 115: openapi.v3.PathItem.specification_extension:type_name -> openapi.v3.NamedAny + 37, // 116: openapi.v3.Paths.path:type_name -> openapi.v3.NamedPathItem + 29, // 117: openapi.v3.Paths.specification_extension:type_name -> openapi.v3.NamedAny + 40, // 118: openapi.v3.Properties.additional_properties:type_name -> openapi.v3.NamedSchemaOrReference + 38, // 119: openapi.v3.RequestBodiesOrReferences.additional_properties:type_name -> openapi.v3.NamedRequestBodyOrReference + 28, // 120: openapi.v3.RequestBody.content:type_name -> openapi.v3.MediaTypes + 29, // 121: openapi.v3.RequestBody.specification_extension:type_name -> openapi.v3.NamedAny + 57, // 122: openapi.v3.RequestBodyOrReference.request_body:type_name -> openapi.v3.RequestBody + 55, // 123: openapi.v3.RequestBodyOrReference.reference:type_name -> openapi.v3.Reference + 20, // 124: openapi.v3.Response.headers:type_name -> openapi.v3.HeadersOrReferences + 28, // 125: openapi.v3.Response.content:type_name -> openapi.v3.MediaTypes + 26, // 126: openapi.v3.Response.links:type_name -> openapi.v3.LinksOrReferences + 29, // 127: openapi.v3.Response.specification_extension:type_name -> openapi.v3.NamedAny + 59, // 128: openapi.v3.ResponseOrReference.response:type_name -> openapi.v3.Response + 55, // 129: openapi.v3.ResponseOrReference.reference:type_name -> openapi.v3.Reference + 60, // 130: openapi.v3.Responses.default:type_name -> openapi.v3.ResponseOrReference + 39, // 131: openapi.v3.Responses.response_or_reference:type_name -> openapi.v3.NamedResponseOrReference + 29, // 132: openapi.v3.Responses.specification_extension:type_name -> openapi.v3.NamedAny + 39, // 133: openapi.v3.ResponsesOrReferences.additional_properties:type_name -> openapi.v3.NamedResponseOrReference + 9, // 134: openapi.v3.Schema.discriminator:type_name -> openapi.v3.Discriminator + 77, // 135: openapi.v3.Schema.xml:type_name -> openapi.v3.Xml + 17, // 136: openapi.v3.Schema.external_docs:type_name -> openapi.v3.ExternalDocs + 1, // 137: openapi.v3.Schema.example:type_name -> openapi.v3.Any + 1, // 138: openapi.v3.Schema.enum:type_name -> openapi.v3.Any + 64, // 139: openapi.v3.Schema.all_of:type_name -> openapi.v3.SchemaOrReference + 64, // 140: openapi.v3.Schema.one_of:type_name -> openapi.v3.SchemaOrReference + 64, // 141: openapi.v3.Schema.any_of:type_name -> openapi.v3.SchemaOrReference + 63, // 142: openapi.v3.Schema.not:type_name -> openapi.v3.Schema + 22, // 143: openapi.v3.Schema.items:type_name -> openapi.v3.ItemsItem + 54, // 144: openapi.v3.Schema.properties:type_name -> openapi.v3.Properties + 0, // 145: openapi.v3.Schema.additional_properties:type_name -> openapi.v3.AdditionalPropertiesItem + 8, // 146: openapi.v3.Schema.default:type_name -> openapi.v3.DefaultType + 29, // 147: openapi.v3.Schema.specification_extension:type_name -> openapi.v3.NamedAny + 63, // 148: openapi.v3.SchemaOrReference.schema:type_name -> openapi.v3.Schema + 55, // 149: openapi.v3.SchemaOrReference.reference:type_name -> openapi.v3.Reference + 40, // 150: openapi.v3.SchemasOrReferences.additional_properties:type_name -> openapi.v3.NamedSchemaOrReference + 44, // 151: openapi.v3.SecurityRequirement.additional_properties:type_name -> openapi.v3.NamedStringArray + 46, // 152: openapi.v3.SecurityScheme.flows:type_name -> openapi.v3.OauthFlows + 29, // 153: openapi.v3.SecurityScheme.specification_extension:type_name -> openapi.v3.NamedAny + 67, // 154: openapi.v3.SecuritySchemeOrReference.security_scheme:type_name -> openapi.v3.SecurityScheme + 55, // 155: openapi.v3.SecuritySchemeOrReference.reference:type_name -> openapi.v3.Reference + 41, // 156: openapi.v3.SecuritySchemesOrReferences.additional_properties:type_name -> openapi.v3.NamedSecuritySchemeOrReference + 72, // 157: openapi.v3.Server.variables:type_name -> openapi.v3.ServerVariables + 29, // 158: openapi.v3.Server.specification_extension:type_name -> openapi.v3.NamedAny + 29, // 159: openapi.v3.ServerVariable.specification_extension:type_name -> openapi.v3.NamedAny + 42, // 160: openapi.v3.ServerVariables.additional_properties:type_name -> openapi.v3.NamedServerVariable + 43, // 161: openapi.v3.Strings.additional_properties:type_name -> openapi.v3.NamedString + 17, // 162: openapi.v3.Tag.external_docs:type_name -> openapi.v3.ExternalDocs + 29, // 163: openapi.v3.Tag.specification_extension:type_name -> openapi.v3.NamedAny + 29, // 164: openapi.v3.Xml.specification_extension:type_name -> openapi.v3.NamedAny + 165, // [165:165] is the sub-list for method output_type + 165, // [165:165] is the sub-list for method input_type + 165, // [165:165] is the sub-list for extension type_name + 165, // [165:165] is the sub-list for extension extendee + 0, // [0:165] is the sub-list for field type_name +} + +func init() { file_openapiv3_OpenAPIv3_proto_init() } +func file_openapiv3_OpenAPIv3_proto_init() { + if File_openapiv3_OpenAPIv3_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_openapiv3_OpenAPIv3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdditionalPropertiesItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnyOrExpression); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Callback); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallbackOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallbacksOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Components); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Contact); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DefaultType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Discriminator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Encoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Encodings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Example); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExampleOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExamplesOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expression); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalDocs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeadersOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ItemsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*License); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinkOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinksOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MediaType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MediaTypes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedAny); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedCallbackOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedEncoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedExampleOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedHeaderOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedLinkOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedMediaType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedParameterOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedPathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedRequestBodyOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponseOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSchemaOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSecuritySchemeOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedServerVariable); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedStringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OauthFlow); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OauthFlows); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Object); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Operation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParameterOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParametersOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Paths); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Properties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestBodiesOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestBody); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestBodyOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Responses); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponsesOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemaOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemasOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityRequirement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityScheme); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecuritySchemeOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecuritySchemesOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Server); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerVariable); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerVariables); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SpecificationExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Strings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Xml); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AdditionalPropertiesItem_SchemaOrReference)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*AnyOrExpression_Any)(nil), + (*AnyOrExpression_Expression)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*CallbackOrReference_Callback)(nil), + (*CallbackOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*DefaultType_Number)(nil), + (*DefaultType_Boolean)(nil), + (*DefaultType_String_)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []interface{}{ + (*ExampleOrReference_Example)(nil), + (*ExampleOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []interface{}{ + (*HeaderOrReference_Header)(nil), + (*HeaderOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []interface{}{ + (*LinkOrReference_Link)(nil), + (*LinkOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []interface{}{ + (*ParameterOrReference_Parameter)(nil), + (*ParameterOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []interface{}{ + (*RequestBodyOrReference_RequestBody)(nil), + (*RequestBodyOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []interface{}{ + (*ResponseOrReference_Response)(nil), + (*ResponseOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []interface{}{ + (*SchemaOrReference_Schema)(nil), + (*SchemaOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []interface{}{ + (*SecuritySchemeOrReference_SecurityScheme)(nil), + (*SecuritySchemeOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []interface{}{ + (*SpecificationExtension_Number)(nil), + (*SpecificationExtension_Boolean)(nil), + (*SpecificationExtension_String_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv3_OpenAPIv3_proto_rawDesc, + NumEnums: 0, + NumMessages: 78, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_openapiv3_OpenAPIv3_proto_goTypes, + DependencyIndexes: file_openapiv3_OpenAPIv3_proto_depIdxs, + MessageInfos: file_openapiv3_OpenAPIv3_proto_msgTypes, + }.Build() + File_openapiv3_OpenAPIv3_proto = out.File + file_openapiv3_OpenAPIv3_proto_rawDesc = nil + file_openapiv3_OpenAPIv3_proto_goTypes = nil + file_openapiv3_OpenAPIv3_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gnostic/openapiv3/OpenAPIv3.proto b/vendor/github.com/googleapis/gnostic/openapiv3/OpenAPIv3.proto new file mode 100644 index 000000000..cb4b133f9 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv3/OpenAPIv3.proto @@ -0,0 +1,670 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v3; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v3"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +// The Go package name. +option go_package = "./openapiv3;openapi_v3"; + +message AdditionalPropertiesItem { + oneof oneof { + SchemaOrReference schema_or_reference = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message AnyOrExpression { + oneof oneof { + Any any = 1; + Expression expression = 2; + } +} + +// A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation. +message Callback { + repeated NamedPathItem path = 1; + repeated NamedAny specification_extension = 2; +} + +message CallbackOrReference { + oneof oneof { + Callback callback = 1; + Reference reference = 2; + } +} + +message CallbacksOrReferences { + repeated NamedCallbackOrReference additional_properties = 1; +} + +// Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object. +message Components { + SchemasOrReferences schemas = 1; + ResponsesOrReferences responses = 2; + ParametersOrReferences parameters = 3; + ExamplesOrReferences examples = 4; + RequestBodiesOrReferences request_bodies = 5; + HeadersOrReferences headers = 6; + SecuritySchemesOrReferences security_schemes = 7; + LinksOrReferences links = 8; + CallbacksOrReferences callbacks = 9; + repeated NamedAny specification_extension = 10; +} + +// Contact information for the exposed API. +message Contact { + string name = 1; + string url = 2; + string email = 3; + repeated NamedAny specification_extension = 4; +} + +message DefaultType { + oneof oneof { + double number = 1; + bool boolean = 2; + string string = 3; + } +} + +// When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered. +message Discriminator { + string property_name = 1; + Strings mapping = 2; + repeated NamedAny specification_extension = 3; +} + +message Document { + string openapi = 1; + Info info = 2; + repeated Server servers = 3; + Paths paths = 4; + Components components = 5; + repeated SecurityRequirement security = 6; + repeated Tag tags = 7; + ExternalDocs external_docs = 8; + repeated NamedAny specification_extension = 9; +} + +// A single encoding definition applied to a single schema property. +message Encoding { + string content_type = 1; + HeadersOrReferences headers = 2; + string style = 3; + bool explode = 4; + bool allow_reserved = 5; + repeated NamedAny specification_extension = 6; +} + +message Encodings { + repeated NamedEncoding additional_properties = 1; +} + +message Example { + string summary = 1; + string description = 2; + Any value = 3; + string external_value = 4; + repeated NamedAny specification_extension = 5; +} + +message ExampleOrReference { + oneof oneof { + Example example = 1; + Reference reference = 2; + } +} + +message ExamplesOrReferences { + repeated NamedExampleOrReference additional_properties = 1; +} + +message Expression { + repeated NamedAny additional_properties = 1; +} + +// Allows referencing an external resource for extended documentation. +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny specification_extension = 3; +} + +// The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`). +message Header { + string description = 1; + bool required = 2; + bool deprecated = 3; + bool allow_empty_value = 4; + string style = 5; + bool explode = 6; + bool allow_reserved = 7; + SchemaOrReference schema = 8; + Any example = 9; + ExamplesOrReferences examples = 10; + MediaTypes content = 11; + repeated NamedAny specification_extension = 12; +} + +message HeaderOrReference { + oneof oneof { + Header header = 1; + Reference reference = 2; + } +} + +message HeadersOrReferences { + repeated NamedHeaderOrReference additional_properties = 1; +} + +// The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience. +message Info { + string title = 1; + string description = 2; + string terms_of_service = 3; + Contact contact = 4; + License license = 5; + string version = 6; + repeated NamedAny specification_extension = 7; + string summary = 8; +} + +message ItemsItem { + repeated SchemaOrReference schema_or_reference = 1; +} + +// License information for the exposed API. +message License { + string name = 1; + string url = 2; + repeated NamedAny specification_extension = 3; +} + +// The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation. +message Link { + string operation_ref = 1; + string operation_id = 2; + AnyOrExpression parameters = 3; + AnyOrExpression request_body = 4; + string description = 5; + Server server = 6; + repeated NamedAny specification_extension = 7; +} + +message LinkOrReference { + oneof oneof { + Link link = 1; + Reference reference = 2; + } +} + +message LinksOrReferences { + repeated NamedLinkOrReference additional_properties = 1; +} + +// Each Media Type Object provides schema and examples for the media type identified by its key. +message MediaType { + SchemaOrReference schema = 1; + Any example = 2; + ExamplesOrReferences examples = 3; + Encodings encoding = 4; + repeated NamedAny specification_extension = 5; +} + +message MediaTypes { + repeated NamedMediaType additional_properties = 1; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of CallbackOrReference as ordered (name,value) pairs. +message NamedCallbackOrReference { + // Map key + string name = 1; + // Mapped value + CallbackOrReference value = 2; +} + +// Automatically-generated message used to represent maps of Encoding as ordered (name,value) pairs. +message NamedEncoding { + // Map key + string name = 1; + // Mapped value + Encoding value = 2; +} + +// Automatically-generated message used to represent maps of ExampleOrReference as ordered (name,value) pairs. +message NamedExampleOrReference { + // Map key + string name = 1; + // Mapped value + ExampleOrReference value = 2; +} + +// Automatically-generated message used to represent maps of HeaderOrReference as ordered (name,value) pairs. +message NamedHeaderOrReference { + // Map key + string name = 1; + // Mapped value + HeaderOrReference value = 2; +} + +// Automatically-generated message used to represent maps of LinkOrReference as ordered (name,value) pairs. +message NamedLinkOrReference { + // Map key + string name = 1; + // Mapped value + LinkOrReference value = 2; +} + +// Automatically-generated message used to represent maps of MediaType as ordered (name,value) pairs. +message NamedMediaType { + // Map key + string name = 1; + // Mapped value + MediaType value = 2; +} + +// Automatically-generated message used to represent maps of ParameterOrReference as ordered (name,value) pairs. +message NamedParameterOrReference { + // Map key + string name = 1; + // Mapped value + ParameterOrReference value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of RequestBodyOrReference as ordered (name,value) pairs. +message NamedRequestBodyOrReference { + // Map key + string name = 1; + // Mapped value + RequestBodyOrReference value = 2; +} + +// Automatically-generated message used to represent maps of ResponseOrReference as ordered (name,value) pairs. +message NamedResponseOrReference { + // Map key + string name = 1; + // Mapped value + ResponseOrReference value = 2; +} + +// Automatically-generated message used to represent maps of SchemaOrReference as ordered (name,value) pairs. +message NamedSchemaOrReference { + // Map key + string name = 1; + // Mapped value + SchemaOrReference value = 2; +} + +// Automatically-generated message used to represent maps of SecuritySchemeOrReference as ordered (name,value) pairs. +message NamedSecuritySchemeOrReference { + // Map key + string name = 1; + // Mapped value + SecuritySchemeOrReference value = 2; +} + +// Automatically-generated message used to represent maps of ServerVariable as ordered (name,value) pairs. +message NamedServerVariable { + // Map key + string name = 1; + // Mapped value + ServerVariable value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +// Configuration details for a supported OAuth Flow +message OauthFlow { + string authorization_url = 1; + string token_url = 2; + string refresh_url = 3; + Strings scopes = 4; + repeated NamedAny specification_extension = 5; +} + +// Allows configuration of the supported OAuth Flows. +message OauthFlows { + OauthFlow implicit = 1; + OauthFlow password = 2; + OauthFlow client_credentials = 3; + OauthFlow authorization_code = 4; + repeated NamedAny specification_extension = 5; +} + +message Object { + repeated NamedAny additional_properties = 1; +} + +// Describes a single API operation on a path. +message Operation { + repeated string tags = 1; + string summary = 2; + string description = 3; + ExternalDocs external_docs = 4; + string operation_id = 5; + repeated ParameterOrReference parameters = 6; + RequestBodyOrReference request_body = 7; + Responses responses = 8; + CallbacksOrReferences callbacks = 9; + bool deprecated = 10; + repeated SecurityRequirement security = 11; + repeated Server servers = 12; + repeated NamedAny specification_extension = 13; +} + +// Describes a single operation parameter. A unique parameter is defined by a combination of a name and location. +message Parameter { + string name = 1; + string in = 2; + string description = 3; + bool required = 4; + bool deprecated = 5; + bool allow_empty_value = 6; + string style = 7; + bool explode = 8; + bool allow_reserved = 9; + SchemaOrReference schema = 10; + Any example = 11; + ExamplesOrReferences examples = 12; + MediaTypes content = 13; + repeated NamedAny specification_extension = 14; +} + +message ParameterOrReference { + oneof oneof { + Parameter parameter = 1; + Reference reference = 2; + } +} + +message ParametersOrReferences { + repeated NamedParameterOrReference additional_properties = 1; +} + +// Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available. +message PathItem { + string _ref = 1; + string summary = 2; + string description = 3; + Operation get = 4; + Operation put = 5; + Operation post = 6; + Operation delete = 7; + Operation options = 8; + Operation head = 9; + Operation patch = 10; + Operation trace = 11; + repeated Server servers = 12; + repeated ParameterOrReference parameters = 13; + repeated NamedAny specification_extension = 14; +} + +// Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints. +message Paths { + repeated NamedPathItem path = 1; + repeated NamedAny specification_extension = 2; +} + +message Properties { + repeated NamedSchemaOrReference additional_properties = 1; +} + +// A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification. +message Reference { + string _ref = 1; +} + +message RequestBodiesOrReferences { + repeated NamedRequestBodyOrReference additional_properties = 1; +} + +// Describes a single request body. +message RequestBody { + string description = 1; + MediaTypes content = 2; + bool required = 3; + repeated NamedAny specification_extension = 4; +} + +message RequestBodyOrReference { + oneof oneof { + RequestBody request_body = 1; + Reference reference = 2; + } +} + +// Describes a single response from an API Operation, including design-time, static `links` to operations based on the response. +message Response { + string description = 1; + HeadersOrReferences headers = 2; + MediaTypes content = 3; + LinksOrReferences links = 4; + repeated NamedAny specification_extension = 5; +} + +message ResponseOrReference { + oneof oneof { + Response response = 1; + Reference reference = 2; + } +} + +// A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call. +message Responses { + ResponseOrReference default = 1; + repeated NamedResponseOrReference response_or_reference = 2; + repeated NamedAny specification_extension = 3; +} + +message ResponsesOrReferences { + repeated NamedResponseOrReference additional_properties = 1; +} + +// The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema. +message Schema { + bool nullable = 1; + Discriminator discriminator = 2; + bool read_only = 3; + bool write_only = 4; + Xml xml = 5; + ExternalDocs external_docs = 6; + Any example = 7; + bool deprecated = 8; + string title = 9; + double multiple_of = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + int64 max_properties = 21; + int64 min_properties = 22; + repeated string required = 23; + repeated Any enum = 24; + string type = 25; + repeated SchemaOrReference all_of = 26; + repeated SchemaOrReference one_of = 27; + repeated SchemaOrReference any_of = 28; + Schema not = 29; + ItemsItem items = 30; + Properties properties = 31; + AdditionalPropertiesItem additional_properties = 32; + DefaultType default = 33; + string description = 34; + string format = 35; + repeated NamedAny specification_extension = 36; +} + +message SchemaOrReference { + oneof oneof { + Schema schema = 1; + Reference reference = 2; + } +} + +message SchemasOrReferences { + repeated NamedSchemaOrReference additional_properties = 1; +} + +// Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request. +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +// Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE. +message SecurityScheme { + string type = 1; + string description = 2; + string name = 3; + string in = 4; + string scheme = 5; + string bearer_format = 6; + OauthFlows flows = 7; + string open_id_connect_url = 8; + repeated NamedAny specification_extension = 9; +} + +message SecuritySchemeOrReference { + oneof oneof { + SecurityScheme security_scheme = 1; + Reference reference = 2; + } +} + +message SecuritySchemesOrReferences { + repeated NamedSecuritySchemeOrReference additional_properties = 1; +} + +// An object representing a Server. +message Server { + string url = 1; + string description = 2; + ServerVariables variables = 3; + repeated NamedAny specification_extension = 4; +} + +// An object representing a Server Variable for server URL template substitution. +message ServerVariable { + repeated string enum = 1; + string default = 2; + string description = 3; + repeated NamedAny specification_extension = 4; +} + +message ServerVariables { + repeated NamedServerVariable additional_properties = 1; +} + +// Any property starting with x- is valid. +message SpecificationExtension { + oneof oneof { + double number = 1; + bool boolean = 2; + string string = 3; + } +} + +message StringArray { + repeated string value = 1; +} + +message Strings { + repeated NamedString additional_properties = 1; +} + +// Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances. +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny specification_extension = 4; +} + +// A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior. +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny specification_extension = 6; +} + diff --git a/vendor/github.com/googleapis/gnostic/openapiv3/README.md b/vendor/github.com/googleapis/gnostic/openapiv3/README.md new file mode 100644 index 000000000..5ee12d92e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv3/README.md @@ -0,0 +1,21 @@ +# OpenAPI v3 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model and related code for +supporting OpenAPI v3. + +Gnostic applications and plugins can use OpenAPIv3.proto to generate Protocol +Buffer support code for their preferred languages. + +OpenAPIv3.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into +the Protocol Buffer-based datastructures generated from OpenAPIv3.proto. + +OpenAPIv3.proto and OpenAPIv3.go are generated by the Gnostic compiler +generator, and OpenAPIv3.pb.go is generated by protoc, the Protocol Buffer +compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. + +openapi-3.1.json is a JSON schema for OpenAPI 3.1 that is automatically +generated from the OpenAPI 3.1 specification. It is not an official JSON Schema +for OpenAPI. + +The schema-generator directory contains support code which generates +openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown). diff --git a/vendor/github.com/googleapis/gnostic/openapiv3/document.go b/vendor/github.com/googleapis/gnostic/openapiv3/document.go new file mode 100644 index 000000000..999f7fd29 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv3/document.go @@ -0,0 +1,41 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapi_v3 + +import ( + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v3" +) + +// ParseDocument reads an OpenAPI v3 description from a YAML/JSON representation. +func ParseDocument(b []byte) (*Document, error) { + info, err := compiler.ReadInfoFromBytes("", b) + if err != nil { + return nil, err + } + root := info.Content[0] + return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil)) +} + +// YAMLValue produces a serialized YAML representation of the document. +func (d *Document) YAMLValue(comment string) ([]byte, error) { + rawInfo := d.ToRawInfo() + rawInfo = &yaml.Node{ + Kind: yaml.DocumentNode, + Content: []*yaml.Node{rawInfo}, + HeadComment: comment, + } + return yaml.Marshal(rawInfo) +} diff --git a/vendor/github.com/googleapis/gnostic/openapiv3/openapi-3.0.json b/vendor/github.com/googleapis/gnostic/openapiv3/openapi-3.0.json new file mode 100644 index 000000000..aad0d1b65 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv3/openapi-3.0.json @@ -0,0 +1,1245 @@ +{ + "title": "A JSON Schema for OpenAPI 3.0.", + "id": "http://openapis.org/v3/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "description": "This is the root document object of the OpenAPI document.", + "required": [ + "openapi", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "openapi": { + "type": "string" + }, + "info": { + "$ref": "#/definitions/info" + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/server" + }, + "uniqueItems": true + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "components": { + "$ref": "#/definitions/components" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", + "required": [ + "title", + "version" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "termsOfService": { + "type": "string" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + }, + "version": { + "type": "string" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the exposed API.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + }, + "email": { + "type": "string", + "format": "email" + } + } + }, + "license": { + "type": "object", + "description": "License information for the exposed API.", + "required": [ + "name" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "required": [ + "url" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + } + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "required": [ + "default" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + } + } + }, + "components": { + "type": "object", + "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemas": { + "$ref": "#/definitions/schemasOrReferences" + }, + "responses": { + "$ref": "#/definitions/responsesOrReferences" + }, + "parameters": { + "$ref": "#/definitions/parametersOrReferences" + }, + "examples": { + "$ref": "#/definitions/examplesOrReferences" + }, + "requestBodies": { + "$ref": "#/definitions/requestBodiesOrReferences" + }, + "headers": { + "$ref": "#/definitions/headersOrReferences" + }, + "securitySchemes": { + "$ref": "#/definitions/securitySchemesOrReferences" + }, + "links": { + "$ref": "#/definitions/linksOrReferences" + }, + "callbacks": { + "$ref": "#/definitions/callbacksOrReferences" + } + } + }, + "paths": { + "type": "object", + "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", + "additionalProperties": false, + "patternProperties": { + "^/": { + "$ref": "#/definitions/pathItem" + }, + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "pathItem": { + "type": "object", + "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "trace": { + "$ref": "#/definitions/operation" + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/server" + }, + "uniqueItems": true + }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/parameterOrReference" + }, + "uniqueItems": true + } + } + }, + "operation": { + "type": "object", + "description": "Describes a single API operation on a path.", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/parameterOrReference" + }, + "uniqueItems": true + }, + "requestBody": { + "$ref": "#/definitions/requestBodyOrReference" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "callbacks": { + "$ref": "#/definitions/callbacksOrReferences" + }, + "deprecated": { + "type": "boolean" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/server" + }, + "uniqueItems": true + } + } + }, + "externalDocs": { + "type": "object", + "description": "Allows referencing an external resource for extended documentation.", + "required": [ + "url" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "parameter": { + "type": "object", + "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", + "required": [ + "name", + "in" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "name": { + "type": "string" + }, + "in": { + "type": "string" + }, + "description": { + "type": "string" + }, + "required": { + "type": "boolean" + }, + "deprecated": { + "type": "boolean" + }, + "allowEmptyValue": { + "type": "boolean" + }, + "style": { + "type": "string" + }, + "explode": { + "type": "boolean" + }, + "allowReserved": { + "type": "boolean" + }, + "schema": { + "$ref": "#/definitions/schemaOrReference" + }, + "example": { + "$ref": "#/definitions/any" + }, + "examples": { + "$ref": "#/definitions/examplesOrReferences" + }, + "content": { + "$ref": "#/definitions/mediaTypes" + } + } + }, + "requestBody": { + "type": "object", + "description": "Describes a single request body.", + "required": [ + "content" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string" + }, + "content": { + "$ref": "#/definitions/mediaTypes" + }, + "required": { + "type": "boolean" + } + } + }, + "mediaType": { + "type": "object", + "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schema": { + "$ref": "#/definitions/schemaOrReference" + }, + "example": { + "$ref": "#/definitions/any" + }, + "examples": { + "$ref": "#/definitions/examplesOrReferences" + }, + "encoding": { + "$ref": "#/definitions/encodings" + } + } + }, + "encoding": { + "type": "object", + "description": "A single encoding definition applied to a single schema property.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "contentType": { + "type": "string" + }, + "headers": { + "$ref": "#/definitions/headersOrReferences" + }, + "style": { + "type": "string" + }, + "explode": { + "type": "boolean" + }, + "allowReserved": { + "type": "boolean" + } + } + }, + "responses": { + "type": "object", + "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", + "additionalProperties": false, + "patternProperties": { + "^([0-9X]{3})$": { + "$ref": "#/definitions/responseOrReference" + }, + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "default": { + "$ref": "#/definitions/responseOrReference" + } + } + }, + "response": { + "type": "object", + "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", + "required": [ + "description" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string" + }, + "headers": { + "$ref": "#/definitions/headersOrReferences" + }, + "content": { + "$ref": "#/definitions/mediaTypes" + }, + "links": { + "$ref": "#/definitions/linksOrReferences" + } + } + }, + "callback": { + "type": "object", + "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", + "additionalProperties": false, + "patternProperties": { + "^": { + "$ref": "#/definitions/pathItem" + }, + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "example": { + "type": "object", + "description": "", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "value": { + "$ref": "#/definitions/any" + }, + "externalValue": { + "type": "string" + } + } + }, + "link": { + "type": "object", + "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "operationRef": { + "type": "string" + }, + "operationId": { + "type": "string" + }, + "parameters": { + "$ref": "#/definitions/anysOrExpressions" + }, + "requestBody": { + "$ref": "#/definitions/anyOrExpression" + }, + "description": { + "type": "string" + }, + "server": { + "$ref": "#/definitions/server" + } + } + }, + "header": { + "type": "object", + "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string" + }, + "required": { + "type": "boolean" + }, + "deprecated": { + "type": "boolean" + }, + "allowEmptyValue": { + "type": "boolean" + }, + "style": { + "type": "string" + }, + "explode": { + "type": "boolean" + }, + "allowReserved": { + "type": "boolean" + }, + "schema": { + "$ref": "#/definitions/schemaOrReference" + }, + "example": { + "$ref": "#/definitions/any" + }, + "examples": { + "$ref": "#/definitions/examplesOrReferences" + }, + "content": { + "$ref": "#/definitions/mediaTypes" + } + } + }, + "tag": { + "type": "object", + "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", + "required": [ + "name" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + } + }, + "reference": { + "type": "object", + "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + }, + "schema": { + "type": "object", + "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "nullable": { + "type": "boolean" + }, + "discriminator": { + "$ref": "#/definitions/discriminator" + }, + "readOnly": { + "type": "boolean" + }, + "writeOnly": { + "type": "boolean" + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": { + "$ref": "#/definitions/any" + }, + "deprecated": { + "type": "boolean" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/required" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "type": { + "type": "string" + }, + "allOf": { + "type": "array", + "items": { + "$ref": "#/definitions/schemaOrReference" + }, + "minItems": 1 + }, + "oneOf": { + "type": "array", + "items": { + "$ref": "#/definitions/schemaOrReference" + }, + "minItems": 1 + }, + "anyOf": { + "type": "array", + "items": { + "$ref": "#/definitions/schemaOrReference" + }, + "minItems": 1 + }, + "not": { + "$ref": "#/definitions/schema" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schemaOrReference" + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/schemaOrReference" + }, + "minItems": 1 + } + ] + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schemaOrReference" + } + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schemaOrReference" + }, + { + "type": "boolean" + } + ] + }, + "default": { + "$ref": "#/definitions/defaultType" + }, + "description": { + "type": "string" + }, + "format": { + "type": "string" + } + } + }, + "discriminator": { + "type": "object", + "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", + "required": [ + "propertyName" + ], + "additionalProperties": false, + "properties": { + "propertyName": { + "type": "string" + }, + "mapping": { + "$ref": "#/definitions/strings" + } + } + }, + "xml": { + "type": "object", + "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean" + }, + "wrapped": { + "type": "boolean" + } + } + }, + "securityScheme": { + "type": "object", + "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header or as a query parameter), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect Discovery.", + "required": [ + "type" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "type": { + "type": "string" + }, + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "in": { + "type": "string" + }, + "scheme": { + "type": "string" + }, + "bearerFormat": { + "type": "string" + }, + "flows": { + "$ref": "#/definitions/oauthFlows" + }, + "openIdConnectUrl": { + "type": "string" + } + } + }, + "oauthFlows": { + "type": "object", + "description": "Allows configuration of the supported OAuth Flows.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "implicit": { + "$ref": "#/definitions/oauthFlow" + }, + "password": { + "$ref": "#/definitions/oauthFlow" + }, + "clientCredentials": { + "$ref": "#/definitions/oauthFlow" + }, + "authorizationCode": { + "$ref": "#/definitions/oauthFlow" + } + } + }, + "oauthFlow": { + "type": "object", + "description": "Configuration details for a supported OAuth Flow", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "authorizationUrl": { + "type": "string" + }, + "tokenUrl": { + "type": "string" + }, + "refreshUrl": { + "type": "string" + }, + "scopes": { + "$ref": "#/definitions/strings" + } + } + }, + "securityRequirement": { + "type": "object", + "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the Open API object or Operation Object, only one of Security Requirement Objects in the list needs to be satisfied to authorize the request.", + "additionalProperties": false, + "patternProperties": { + "^[a-zA-Z0-9\\.\\-_]+$": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + } + }, + "anyOrExpression": { + "oneOf": [ + { + "$ref": "#/definitions/any" + }, + { + "$ref": "#/definitions/expression" + } + ] + }, + "callbackOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/callback" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "exampleOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/example" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "headerOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/header" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "linkOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/link" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "parameterOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "requestBodyOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/requestBody" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "responseOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "schemaOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "securitySchemeOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/securityScheme" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "anysOrExpressions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/anyOrExpression" + } + }, + "callbacksOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/callbackOrReference" + } + }, + "encodings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/encoding" + } + }, + "examplesOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/exampleOrReference" + } + }, + "headersOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/headerOrReference" + } + }, + "linksOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/linkOrReference" + } + }, + "mediaTypes": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/mediaType" + } + }, + "parametersOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameterOrReference" + } + }, + "requestBodiesOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/requestBodyOrReference" + } + }, + "responsesOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/responseOrReference" + } + }, + "schemasOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schemaOrReference" + } + }, + "securitySchemesOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/securitySchemeOrReference" + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "strings": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "object": { + "type": "object", + "additionalProperties": true + }, + "any": { + "additionalProperties": true + }, + "expression": { + "type": "object", + "additionalProperties": true + }, + "specificationExtension": { + "description": "Any property starting with x- is valid.", + "oneOf": [ + { + "type": "null" + }, + { + "type": "number" + }, + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "type": "object" + }, + { + "type": "array" + } + ] + }, + "defaultType": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "type": "number" + }, + { + "type": "boolean" + }, + { + "type": "string" + } + ] + } + } +} diff --git a/vendor/github.com/googleapis/gnostic/openapiv3/openapi-3.1.json b/vendor/github.com/googleapis/gnostic/openapiv3/openapi-3.1.json new file mode 100644 index 000000000..9af27be66 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/openapiv3/openapi-3.1.json @@ -0,0 +1,1244 @@ +{ + "title": "A JSON Schema for OpenAPI 3.0.", + "id": "http://openapis.org/v3/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "description": "This is the root document object of the OpenAPI document.", + "required": [ + "openapi", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "openapi": { + "type": "string" + }, + "info": { + "$ref": "#/definitions/info" + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/server" + }, + "uniqueItems": true + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "components": { + "$ref": "#/definitions/components" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", + "required": [ + "title", + "version" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "termsOfService": { + "type": "string" + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + }, + "version": { + "type": "string" + }, + "summary": { + "type": "string" + }, + } + }, + "contact": { + "type": "object", + "description": "Contact information for the exposed API.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + }, + "email": { + "type": "string", + "format": "email" + } + } + }, + "license": { + "type": "object", + "description": "License information for the exposed API.", + "required": [ + "name" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "server": { + "type": "object", + "description": "An object representing a Server.", + "required": [ + "url" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "variables": { + "$ref": "#/definitions/serverVariables" + } + } + }, + "serverVariable": { + "type": "object", + "description": "An object representing a Server Variable for server URL template substitution.", + "required": [ + "default" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "enum": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "default": { + "type": "string" + }, + "description": { + "type": "string" + } + } + }, + "components": { + "type": "object", + "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schemas": { + "$ref": "#/definitions/schemasOrReferences" + }, + "responses": { + "$ref": "#/definitions/responsesOrReferences" + }, + "parameters": { + "$ref": "#/definitions/parametersOrReferences" + }, + "examples": { + "$ref": "#/definitions/examplesOrReferences" + }, + "requestBodies": { + "$ref": "#/definitions/requestBodiesOrReferences" + }, + "headers": { + "$ref": "#/definitions/headersOrReferences" + }, + "securitySchemes": { + "$ref": "#/definitions/securitySchemesOrReferences" + }, + "links": { + "$ref": "#/definitions/linksOrReferences" + }, + "callbacks": { + "$ref": "#/definitions/callbacksOrReferences" + } + } + }, + "paths": { + "type": "object", + "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", + "additionalProperties": false, + "patternProperties": { + "^/": { + "$ref": "#/definitions/pathItem" + }, + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "pathItem": { + "type": "object", + "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "trace": { + "$ref": "#/definitions/operation" + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/server" + }, + "uniqueItems": true + }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/parameterOrReference" + }, + "uniqueItems": true + } + } + }, + "operation": { + "type": "object", + "description": "Describes a single API operation on a path.", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/parameterOrReference" + }, + "uniqueItems": true + }, + "requestBody": { + "$ref": "#/definitions/requestBodyOrReference" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "callbacks": { + "$ref": "#/definitions/callbacksOrReferences" + }, + "deprecated": { + "type": "boolean" + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "servers": { + "type": "array", + "items": { + "$ref": "#/definitions/server" + }, + "uniqueItems": true + } + } + }, + "externalDocs": { + "type": "object", + "description": "Allows referencing an external resource for extended documentation.", + "required": [ + "url" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "parameter": { + "type": "object", + "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", + "required": [ + "name", + "in" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "name": { + "type": "string" + }, + "in": { + "type": "string" + }, + "description": { + "type": "string" + }, + "required": { + "type": "boolean" + }, + "deprecated": { + "type": "boolean" + }, + "allowEmptyValue": { + "type": "boolean" + }, + "style": { + "type": "string" + }, + "explode": { + "type": "boolean" + }, + "allowReserved": { + "type": "boolean" + }, + "schema": { + "$ref": "#/definitions/schemaOrReference" + }, + "example": { + "$ref": "#/definitions/any" + }, + "examples": { + "$ref": "#/definitions/examplesOrReferences" + }, + "content": { + "$ref": "#/definitions/mediaTypes" + } + } + }, + "requestBody": { + "type": "object", + "description": "Describes a single request body.", + "required": [ + "content" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string" + }, + "content": { + "$ref": "#/definitions/mediaTypes" + }, + "required": { + "type": "boolean" + } + } + }, + "mediaType": { + "type": "object", + "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "schema": { + "$ref": "#/definitions/schemaOrReference" + }, + "example": { + "$ref": "#/definitions/any" + }, + "examples": { + "$ref": "#/definitions/examplesOrReferences" + }, + "encoding": { + "$ref": "#/definitions/encodings" + } + } + }, + "encoding": { + "type": "object", + "description": "A single encoding definition applied to a single schema property.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "contentType": { + "type": "string" + }, + "headers": { + "$ref": "#/definitions/headersOrReferences" + }, + "style": { + "type": "string" + }, + "explode": { + "type": "boolean" + }, + "allowReserved": { + "type": "boolean" + } + } + }, + "responses": { + "type": "object", + "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", + "additionalProperties": false, + "patternProperties": { + "^([0-9X]{3})$": { + "$ref": "#/definitions/responseOrReference" + }, + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "default": { + "$ref": "#/definitions/responseOrReference" + } + } + }, + "response": { + "type": "object", + "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", + "required": [ + "description" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string" + }, + "headers": { + "$ref": "#/definitions/headersOrReferences" + }, + "content": { + "$ref": "#/definitions/mediaTypes" + }, + "links": { + "$ref": "#/definitions/linksOrReferences" + } + } + }, + "callback": { + "type": "object", + "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", + "additionalProperties": false, + "patternProperties": { + "^": { + "$ref": "#/definitions/pathItem" + }, + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + } + }, + "example": { + "type": "object", + "description": "", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "summary": { + "type": "string" + }, + "description": { + "type": "string" + }, + "value": { + "$ref": "#/definitions/any" + }, + "externalValue": { + "type": "string" + } + } + }, + "link": { + "type": "object", + "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "operationRef": { + "type": "string" + }, + "operationId": { + "type": "string" + }, + "parameters": { + "$ref": "#/definitions/anyOrExpression" + }, + "requestBody": { + "$ref": "#/definitions/anyOrExpression" + }, + "description": { + "type": "string" + }, + "server": { + "$ref": "#/definitions/server" + } + } + }, + "header": { + "type": "object", + "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "description": { + "type": "string" + }, + "required": { + "type": "boolean" + }, + "deprecated": { + "type": "boolean" + }, + "allowEmptyValue": { + "type": "boolean" + }, + "style": { + "type": "string" + }, + "explode": { + "type": "boolean" + }, + "allowReserved": { + "type": "boolean" + }, + "schema": { + "$ref": "#/definitions/schemaOrReference" + }, + "example": { + "$ref": "#/definitions/any" + }, + "examples": { + "$ref": "#/definitions/examplesOrReferences" + }, + "content": { + "$ref": "#/definitions/mediaTypes" + } + } + }, + "tag": { + "type": "object", + "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", + "required": [ + "name" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + } + }, + "reference": { + "type": "object", + "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + }, + "schema": { + "type": "object", + "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "nullable": { + "type": "boolean" + }, + "discriminator": { + "$ref": "#/definitions/discriminator" + }, + "readOnly": { + "type": "boolean" + }, + "writeOnly": { + "type": "boolean" + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": { + "$ref": "#/definitions/any" + }, + "deprecated": { + "type": "boolean" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/required" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "type": { + "type": "string" + }, + "allOf": { + "type": "array", + "items": { + "$ref": "#/definitions/schemaOrReference" + }, + "minItems": 1 + }, + "oneOf": { + "type": "array", + "items": { + "$ref": "#/definitions/schemaOrReference" + }, + "minItems": 1 + }, + "anyOf": { + "type": "array", + "items": { + "$ref": "#/definitions/schemaOrReference" + }, + "minItems": 1 + }, + "not": { + "$ref": "#/definitions/schema" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schemaOrReference" + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/schemaOrReference" + }, + "minItems": 1 + } + ] + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schemaOrReference" + } + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schemaOrReference" + }, + { + "type": "boolean" + } + ] + }, + "default": { + "$ref": "#/definitions/defaultType" + }, + "description": { + "type": "string" + }, + "format": { + "type": "string" + } + } + }, + "discriminator": { + "type": "object", + "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", + "required": [ + "propertyName" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "propertyName": { + "type": "string" + }, + "mapping": { + "$ref": "#/definitions/strings" + } + } + }, + "xml": { + "type": "object", + "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean" + }, + "wrapped": { + "type": "boolean" + } + } + }, + "securityScheme": { + "type": "object", + "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE.", + "required": [ + "type" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "type": { + "type": "string" + }, + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "in": { + "type": "string" + }, + "scheme": { + "type": "string" + }, + "bearerFormat": { + "type": "string" + }, + "flows": { + "$ref": "#/definitions/oauthFlows" + }, + "openIdConnectUrl": { + "type": "string" + } + } + }, + "oauthFlows": { + "type": "object", + "description": "Allows configuration of the supported OAuth Flows.", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "implicit": { + "$ref": "#/definitions/oauthFlow" + }, + "password": { + "$ref": "#/definitions/oauthFlow" + }, + "clientCredentials": { + "$ref": "#/definitions/oauthFlow" + }, + "authorizationCode": { + "$ref": "#/definitions/oauthFlow" + } + } + }, + "oauthFlow": { + "type": "object", + "description": "Configuration details for a supported OAuth Flow", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/specificationExtension" + } + }, + "properties": { + "authorizationUrl": { + "type": "string" + }, + "tokenUrl": { + "type": "string" + }, + "refreshUrl": { + "type": "string" + }, + "scopes": { + "$ref": "#/definitions/strings" + } + } + }, + "securityRequirement": { + "type": "object", + "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request.", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "anyOrExpression": { + "oneOf": [ + { + "$ref": "#/definitions/any" + }, + { + "$ref": "#/definitions/expression" + } + ] + }, + "callbackOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/callback" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "exampleOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/example" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "headerOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/header" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "linkOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/link" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "parameterOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "requestBodyOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/requestBody" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "responseOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "schemaOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "securitySchemeOrReference": { + "oneOf": [ + { + "$ref": "#/definitions/securityScheme" + }, + { + "$ref": "#/definitions/reference" + } + ] + }, + "callbacksOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/callbackOrReference" + } + }, + "encodings": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/encoding" + } + }, + "examplesOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/exampleOrReference" + } + }, + "headersOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/headerOrReference" + } + }, + "linksOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/linkOrReference" + } + }, + "mediaTypes": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/mediaType" + } + }, + "parametersOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameterOrReference" + } + }, + "requestBodiesOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/requestBodyOrReference" + } + }, + "responsesOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/responseOrReference" + } + }, + "schemasOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schemaOrReference" + } + }, + "securitySchemesOrReferences": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/securitySchemeOrReference" + } + }, + "serverVariables": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/serverVariable" + } + }, + "strings": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "object": { + "type": "object", + "additionalProperties": true + }, + "any": { + "additionalProperties": true + }, + "expression": { + "type": "object", + "additionalProperties": true + }, + "specificationExtension": { + "description": "Any property starting with x- is valid.", + "oneOf": [ + { + "type": "null" + }, + { + "type": "number" + }, + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "type": "object" + }, + { + "type": "array" + } + ] + }, + "defaultType": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "type": "number" + }, + { + "type": "boolean" + }, + { + "type": "string" + } + ] + } + } +} diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 52b111d5f..c589addf9 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -8,8 +8,6 @@ A high-performance 100% compatible drop-in replacement of "encoding/json" -You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) - # Benchmark ![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml index fbb43744d..b097728db 100644 --- a/vendor/github.com/modern-go/reflect2/.travis.yml +++ b/vendor/github.com/modern-go/reflect2/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.8.x + - 1.9.x - 1.x before_install: diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock index 2a3a69893..10ef81118 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.lock +++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock @@ -1,15 +1,9 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" - version = "1.0.0" - [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7" + input-imports = [] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml index 2f4f4dbdc..a9bc5061b 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.toml +++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml @@ -26,10 +26,6 @@ ignored = [] -[[constraint]] - name = "github.com/modern-go/concurrent" - version = "1.0.0" - [prune] go-tests = true unused-packages = true diff --git a/vendor/github.com/modern-go/reflect2/go_above_118.go b/vendor/github.com/modern-go/reflect2/go_above_118.go new file mode 100644 index 000000000..2b4116f6c --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_above_118.go @@ -0,0 +1,23 @@ +//+build go1.18 + +package reflect2 + +import ( + "unsafe" +) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, it *hiter) + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + var it hiter + mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj), &it) + return &UnsafeMapIterator{ + hiter: &it, + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +} \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go deleted file mode 100644 index 5c1cea868..000000000 --- a/vendor/github.com/modern-go/reflect2/go_above_17.go +++ /dev/null @@ -1,8 +0,0 @@ -//+build go1.7 - -package reflect2 - -import "unsafe" - -//go:linkname resolveTypeOff reflect.resolveTypeOff -func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go index c7e3b7801..974f7685e 100644 --- a/vendor/github.com/modern-go/reflect2/go_above_19.go +++ b/vendor/github.com/modern-go/reflect2/go_above_19.go @@ -6,6 +6,9 @@ import ( "unsafe" ) +//go:linkname resolveTypeOff reflect.resolveTypeOff +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer + //go:linkname makemap reflect.makemap func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer) diff --git a/vendor/github.com/modern-go/reflect2/go_below_118.go b/vendor/github.com/modern-go/reflect2/go_below_118.go new file mode 100644 index 000000000..00003dbd7 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_below_118.go @@ -0,0 +1,21 @@ +//+build !go1.18 + +package reflect2 + +import ( + "unsafe" +) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter) + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + return &UnsafeMapIterator{ + hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +} \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go deleted file mode 100644 index 65a93c889..000000000 --- a/vendor/github.com/modern-go/reflect2/go_below_17.go +++ /dev/null @@ -1,9 +0,0 @@ -//+build !go1.7 - -package reflect2 - -import "unsafe" - -func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { - return nil -} diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go deleted file mode 100644 index b050ef70c..000000000 --- a/vendor/github.com/modern-go/reflect2/go_below_19.go +++ /dev/null @@ -1,14 +0,0 @@ -//+build !go1.9 - -package reflect2 - -import ( - "unsafe" -) - -//go:linkname makemap reflect.makemap -func makemap(rtype unsafe.Pointer) (m unsafe.Pointer) - -func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer { - return makemap(rtype) -} diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go index 63b49c799..c43c8b9d6 100644 --- a/vendor/github.com/modern-go/reflect2/reflect2.go +++ b/vendor/github.com/modern-go/reflect2/reflect2.go @@ -1,8 +1,9 @@ package reflect2 import ( - "github.com/modern-go/concurrent" "reflect" + "runtime" + "sync" "unsafe" ) @@ -130,13 +131,13 @@ var ConfigSafe = Config{UseSafeImplementation: true}.Froze() type frozenConfig struct { useSafeImplementation bool - cache *concurrent.Map + cache *sync.Map } func (cfg Config) Froze() *frozenConfig { return &frozenConfig{ useSafeImplementation: cfg.UseSafeImplementation, - cache: concurrent.NewMap(), + cache: new(sync.Map), } } @@ -288,11 +289,12 @@ func NoEscape(p unsafe.Pointer) unsafe.Pointer { } func UnsafeCastString(str string) []byte { + bytes := make([]byte, 0) stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str)) - sliceHeader := &reflect.SliceHeader{ - Data: stringHeader.Data, - Cap: stringHeader.Len, - Len: stringHeader.Len, - } - return *(*[]byte)(unsafe.Pointer(sliceHeader)) + sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes)) + sliceHeader.Data = stringHeader.Data + sliceHeader.Cap = stringHeader.Len + sliceHeader.Len = stringHeader.Len + runtime.KeepAlive(str) + return bytes } diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh deleted file mode 100644 index 3d2b9768c..000000000 --- a/vendor/github.com/modern-go/reflect2/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do - go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go index 3acfb5580..4b13c3155 100644 --- a/vendor/github.com/modern-go/reflect2/type_map.go +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -1,17 +1,13 @@ +// +build !gccgo + package reflect2 import ( "reflect" - "runtime" - "strings" "sync" "unsafe" ) -// typelinks1 for 1.5 ~ 1.6 -//go:linkname typelinks1 reflect.typelinks -func typelinks1() [][]unsafe.Pointer - // typelinks2 for 1.7 ~ //go:linkname typelinks2 reflect.typelinks func typelinks2() (sections []unsafe.Pointer, offset [][]int32) @@ -27,49 +23,10 @@ func discoverTypes() { types = make(map[string]reflect.Type) packages = make(map[string]map[string]reflect.Type) - ver := runtime.Version() - if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { - loadGo15Types() - } else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") { - loadGo15Types() - } else { - loadGo17Types() - } + loadGoTypes() } -func loadGo15Types() { - var obj interface{} = reflect.TypeOf(0) - typePtrss := typelinks1() - for _, typePtrs := range typePtrss { - for _, typePtr := range typePtrs { - (*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr - typ := obj.(reflect.Type) - if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { - loadedType := typ.Elem() - pkgTypes := packages[loadedType.PkgPath()] - if pkgTypes == nil { - pkgTypes = map[string]reflect.Type{} - packages[loadedType.PkgPath()] = pkgTypes - } - types[loadedType.String()] = loadedType - pkgTypes[loadedType.Name()] = loadedType - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr && - typ.Elem().Elem().Kind() == reflect.Struct { - loadedType := typ.Elem().Elem() - pkgTypes := packages[loadedType.PkgPath()] - if pkgTypes == nil { - pkgTypes = map[string]reflect.Type{} - packages[loadedType.PkgPath()] = pkgTypes - } - types[loadedType.String()] = loadedType - pkgTypes[loadedType.Name()] = loadedType - } - } - } -} - -func loadGo17Types() { +func loadGoTypes() { var obj interface{} = reflect.TypeOf(0) sections, offset := typelinks2() for i, offs := range offset { diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go index 57229c8db..b49f614ef 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_link.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go @@ -19,18 +19,12 @@ func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int //go:linkname mapassign reflect.mapassign //go:noescape -func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer) +func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer, val unsafe.Pointer) //go:linkname mapaccess reflect.mapaccess //go:noescape func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) -// m escapes into the return value, but the caller of mapiterinit -// doesn't let the return value escape. -//go:noescape -//go:linkname mapiterinit reflect.mapiterinit -func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter - //go:noescape //go:linkname mapiternext reflect.mapiternext func mapiternext(it *hiter) @@ -42,9 +36,21 @@ func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer) // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate // the layout of this structure. type hiter struct { - key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). - value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). - // rest fields are ignored + key unsafe.Pointer + value unsafe.Pointer + t unsafe.Pointer + h unsafe.Pointer + buckets unsafe.Pointer + bptr unsafe.Pointer + overflow *[]unsafe.Pointer + oldoverflow *[]unsafe.Pointer + startBucket uintptr + offset uint8 + wrapped bool + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr } // add returns p+x. diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go index f2e76e6bb..37872da81 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_map.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go @@ -107,14 +107,6 @@ func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator { return type2.UnsafeIterate(objEFace.data) } -func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { - return &UnsafeMapIterator{ - hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), - pKeyRType: type2.pKeyRType, - pElemRType: type2.pElemRType, - } -} - type UnsafeMapIterator struct { *hiter pKeyRType unsafe.Pointer diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index e0a3b5004..000000000 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go - -stages: - - test - - build - -go: - - 1.12.x - - 1.13.x - - tip - -env: GO111MODULE=on - -before_install: - - go get -u github.com/kyoh86/richgo - - go get -u github.com/mitchellh/gox - - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest - -matrix: - allow_failures: - - go: tip - include: - - stage: build - go: 1.13.x - script: make cobra_generator - -script: - - make test diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index a1b13ddda..074e3979f 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -7,7 +7,6 @@ Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/), name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. [![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) [![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) @@ -19,18 +18,18 @@ name a few. [This list](./projects_using_cobra.md) contains a more extensive lis * [Commands](#commands) * [Flags](#flags) - [Installing](#installing) -- [Getting Started](#getting-started) - * [Using the Cobra Generator](#using-the-cobra-generator) - * [Using the Cobra Library](#using-the-cobra-library) - * [Working with Flags](#working-with-flags) - * [Positional and Custom Arguments](#positional-and-custom-arguments) - * [Example](#example) - * [Help Command](#help-command) - * [Usage Message](#usage-message) - * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](#generating-documentation-for-your-command) - * [Generating shell completions](#generating-shell-completions) +- [Usage](#usage) + * [Using the Cobra Generator](user_guide.md#using-the-cobra-generator) + * [Using the Cobra Library](user_guide.md#using-the-cobra-library) + * [Working with Flags](user_guide.md#working-with-flags) + * [Positional and Custom Arguments](user_guide.md#positional-and-custom-arguments) + * [Example](user_guide.md#example) + * [Help Command](user_guide.md#help-command) + * [Usage Message](user_guide.md#usage-message) + * [PreRun and PostRun Hooks](user_guide.md#prerun-and-postrun-hooks) + * [Suggestions when "unknown command" happens](user_guide.md#suggestions-when-unknown-command-happens) + * [Generating documentation for your command](user_guide.md#generating-documentation-for-your-command) + * [Generating shell completions](user_guide.md#generating-shell-completions) - [Contributing](CONTRIBUTING.md) - [License](#license) @@ -117,643 +116,9 @@ Next, include Cobra in your application: import "github.com/spf13/cobra" ``` -# Getting Started +# Usage -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - homedir "github.com/mitchellh/go-homedir" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := homedir.Dir() - cobra.CheckErr(err) - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Returning and handling errors - -If you wish to return an error to the caller of a command, `RunE` can be used. - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(tryCmd) -} - -var tryCmd = &cobra.Command{ - Use: "try", - Short: "Try and possibly fail at something", - RunE: func(cmd *cobra.Command, args []string) error { - if err := someFunc(); err != nil { - return err - } - return nil - }, -} -``` - -The error can then be caught at the execute function call. - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent', meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally, which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default, Cobra only parses local flags on the target command, and any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example, the persistent flag `author` is bound with `viper`. -**Note**: the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -Or, for persistent flags: -```go -rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkPersistentFlagRequired("region") -``` - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field -of `Command`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires a color argument") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable, meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). - -## Generating shell completions - -Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). +See [User Guide](user_guide.md). # License diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 710614793..733f4d121 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -384,7 +384,7 @@ func writePostscript(buf io.StringWriter, name string) { name = strings.Replace(name, ":", "__", -1) WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) WriteStringAndCheck(buf, fmt.Sprintf(`{ - local cur prev words cword + local cur prev words cword split declare -A flaghash 2>/dev/null || : declare -A aliashash 2>/dev/null || : if declare -F _init_completion >/dev/null 2>&1; then @@ -400,11 +400,13 @@ func writePostscript(buf io.StringWriter, name string) { local flags_with_completion=() local flags_completion=() local commands=("%[1]s") + local command_aliases=() local must_have_one_flag=() local must_have_one_noun=() local has_completion_function local last_command local nouns=() + local noun_aliases=() __%[1]s_handle_word } @@ -510,6 +512,8 @@ func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { // Setup annotations for go completions for registered flags func prepareCustomAnnotationsForFlags(cmd *Command) { + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() for flag := range flagCompletionFunctions { // Make sure the completion script calls the __*_go_custom_completion function for // every registered flag. We need to do this here (and not when the flag was registered diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md index 130f99b92..52919b2fa 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -6,6 +6,8 @@ Please refer to [Shell Completions](shell_completions.md) for details. For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. +**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. + The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. Some code that works in kubernetes: diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go new file mode 100644 index 000000000..8859b57c4 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -0,0 +1,302 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" +) + +func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genBashComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func genBashComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + + WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*- + +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Macs have bash3 for which the bash-completion package doesn't include +# _init_completion. This is a minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +# This function calls the %[1]s program to obtain the completion +# results and the directive. It fills the 'out' and 'directive' vars. +__%[1]s_get_completion_results() { + local requestComp lastParam lastChar args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + args=("${words[@]:1}") + requestComp="${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" + + if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} ''" + fi + + # When completing a flag with an = (e.g., %[1]s -n=) + # bash focuses on the part after the =, so we need to remove + # the flag part from $cur + if [[ "${cur}" == -*=* ]]; then + cur="${cur#*=}" + fi + + __%[1]s_debug "Calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [ "${directive}" = "${out}" ]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "The completion directive is: ${directive}" + __%[1]s_debug "The completions are: ${out[*]}" +} + +__%[1]s_process_completion_results() { + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + else + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "Activating no space" + compopt -o nospace + else + __%[1]s_debug "No space directive not supported in this version of bash" + fi + fi + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "Activating no file completion" + compopt +o default + else + __%[1]s_debug "No file completion directive not supported in this version of bash" + fi + fi + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local fullFilter filter filteringCmd + + # Do not use quotes around the $out variable or else newline + # characters will be kept. + for filter in ${out[*]}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + + # Use printf to strip any trailing newline + local subdir + subdir=$(printf "%%s" "${out[0]}") + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + __%[1]s_handle_standard_completion_case + fi + + __%[1]s_handle_special_char "$cur" : + __%[1]s_handle_special_char "$cur" = +} + +__%[1]s_handle_standard_completion_case() { + local tab comp + tab=$(printf '\t') + + local longest=0 + # Look for the longest completion so that we can format things nicely + while IFS='' read -r comp; do + # Strip any description before checking the length + comp=${comp%%%%$tab*} + # Only consider the completions that match + comp=$(compgen -W "$comp" -- "$cur") + if ((${#comp}>longest)); then + longest=${#comp} + fi + done < <(printf "%%s\n" "${out[@]}") + + local completions=() + while IFS='' read -r comp; do + if [ -z "$comp" ]; then + continue + fi + + __%[1]s_debug "Original comp: $comp" + comp="$(__%[1]s_format_comp_descriptions "$comp" "$longest")" + __%[1]s_debug "Final comp: $comp" + completions+=("$comp") + done < <(printf "%%s\n" "${out[@]}") + + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${completions[*]}" -- "$cur") + + # If there is a single completion left, remove the description text + if [ ${#COMPREPLY[*]} -eq 1 ]; then + __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" + comp="${COMPREPLY[0]%%%% *}" + __%[1]s_debug "Removed description from single completion, which is now: ${comp}" + COMPREPLY=() + COMPREPLY+=("$comp") + fi +} + +__%[1]s_handle_special_char() +{ + local comp="$1" + local char=$2 + if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then + local word=${comp%%"${comp##*${char}}"} + local idx=${#COMPREPLY[*]} + while [[ $((--idx)) -ge 0 ]]; do + COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"} + done + fi +} + +__%[1]s_format_comp_descriptions() +{ + local tab + tab=$(printf '\t') + local comp="$1" + local longest=$2 + + # Properly format the description string which follows a tab character if there is one + if [[ "$comp" == *$tab* ]]; then + desc=${comp#*$tab} + comp=${comp%%%%$tab*} + + # $COLUMNS stores the current shell width. + # Remove an extra 4 because we add 2 spaces and 2 parentheses. + maxdesclength=$(( COLUMNS - longest - 4 )) + + # Make sure we can fit a description of at least 8 characters + # if we are to align the descriptions. + if [[ $maxdesclength -gt 8 ]]; then + # Add the proper number of spaces to align the descriptions + for ((i = ${#comp} ; i < longest ; i++)); do + comp+=" " + done + else + # Don't pad the descriptions so we can fit more text after the completion + maxdesclength=$(( COLUMNS - ${#comp} - 4 )) + fi + + # If there is enough space for any description text, + # truncate the descriptions that are too long for the shell width + if [ $maxdesclength -gt 0 ]; then + if [ ${#desc} -gt $maxdesclength ]; then + desc=${desc:0:$(( maxdesclength - 1 ))} + desc+="…" + fi + comp+=" ($desc)" + fi + fi + + # Must use printf to escape all special characters + printf "%%q" "${comp}" +} + +__start_%[1]s() +{ + local cur prev words cword split + + COMPREPLY=() + + # Call _init_completion from the bash-completion package + # to prepare the arguments properly + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -n "=:" || return + else + __%[1]s_init_completion -n "=:" || return + fi + + __%[1]s_debug + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $cword location, so we need + # to truncate the command-line ($words) up to the $cword location. + words=("${words[@]:0:$cword+1}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," + + local out directive + __%[1]s_get_completion_results + __%[1]s_process_completion_results +} + +if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%[1]s %[1]s +else + complete -o default -o nospace -F __start_%[1]s %[1]s +fi + +# ex: ts=4 sw=4 et filetype=sh +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) +} + +// GenBashCompletionFileV2 generates Bash completion version 2. +func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletionV2(outFile, includeDesc) +} + +// GenBashCompletionV2 generates Bash completion file version 2 +// and writes it to the passed writer. +func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error { + return c.genBashCompletion(w, includeDesc) +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index d6732ad11..2cc18891d 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -63,9 +63,9 @@ type Command struct { // Example is examples of how to use the command. Example string - // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions + // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions ValidArgs []string - // ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion. + // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. // It is a dynamic version of using ValidArgs. // Only one of ValidArgs and ValidArgsFunction can be used for a command. ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) @@ -74,11 +74,12 @@ type Command struct { Args PositionalArgs // ArgAliases is List of aliases for ValidArgs. - // These are not suggested to the user in the bash completion, + // These are not suggested to the user in the shell completion, // but accepted if entered manually. ArgAliases []string - // BashCompletionFunction is custom functions used by the bash autocompletion generator. + // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. + // For portability with other shells, it is recommended to instead use ValidArgsFunction BashCompletionFunction string // Deprecated defines, if this command is deprecated and should print this string when used. @@ -168,6 +169,9 @@ type Command struct { //FParseErrWhitelist flag parse errors to be ignored FParseErrWhitelist FParseErrWhitelist + // CompletionOptions is a set of options to control the handling of shell completion + CompletionOptions CompletionOptions + // commandsAreSorted defines, if command slice are sorted or not. commandsAreSorted bool // commandCalledAs is the name or alias value used to call this command. @@ -884,7 +888,8 @@ func (c *Command) preRun() { } // ExecuteContext is the same as Execute(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. func (c *Command) ExecuteContext(ctx context.Context) error { c.ctx = ctx return c.Execute() @@ -898,6 +903,14 @@ func (c *Command) Execute() error { return err } +// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. +func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) { + c.ctx = ctx + return c.ExecuteC() +} + // ExecuteC executes the command. func (c *Command) ExecuteC() (cmd *Command, err error) { if c.ctx == nil { @@ -914,9 +927,10 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { preExecHookFn(c) } - // initialize help as the last point possible to allow for user - // overriding + // initialize help at the last point to allow for user overriding c.InitDefaultHelpCmd() + // initialize completion at the last point to allow for user overriding + c.initDefaultCompletionCmd() args := c.args @@ -925,7 +939,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { args = os.Args[1:] } - // initialize the hidden command to be used for bash completion + // initialize the hidden command to be used for shell completion c.initCompleteCmd(args) var flags []string diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/completions.go similarity index 70% rename from vendor/github.com/spf13/cobra/custom_completions.go rename to vendor/github.com/spf13/cobra/completions.go index fa060c147..b849b9c84 100644 --- a/vendor/github.com/spf13/cobra/custom_completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "strings" + "sync" "github.com/spf13/pflag" ) @@ -17,13 +18,25 @@ const ( ShellCompNoDescRequestCmd = "__completeNoDesc" ) -// Global map of flag completion functions. +// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} +// lock for reading and writing from flagCompletionFunctions +var flagCompletionMutex = &sync.RWMutex{} + // ShellCompDirective is a bit map representing the different behaviors the shell // can be instructed to have once completions have been provided. type ShellCompDirective int +type flagCompError struct { + subCommand string + flagName string +} + +func (e *flagCompError) Error() string { + return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'" +} + const ( // ShellCompDirectiveError indicates an error occurred and completions should be ignored. ShellCompDirectiveError ShellCompDirective = 1 << iota @@ -34,7 +47,6 @@ const ( // ShellCompDirectiveNoFileComp indicates that the shell should not provide // file completion even when no completion is provided. - // This currently does not work for zsh or bash < 4 ShellCompDirectiveNoFileComp // ShellCompDirectiveFilterFileExt indicates that the provided completions @@ -63,12 +75,41 @@ const ( ShellCompDirectiveDefault ShellCompDirective = 0 ) +const ( + // Constants for the completion command + compCmdName = "completion" + compCmdNoDescFlagName = "no-descriptions" + compCmdNoDescFlagDesc = "disable completion descriptions" + compCmdNoDescFlagDefault = false +) + +// CompletionOptions are the options to control shell completion +type CompletionOptions struct { + // DisableDefaultCmd prevents Cobra from creating a default 'completion' command + DisableDefaultCmd bool + // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + // for shells that support completion descriptions + DisableNoDescFlag bool + // DisableDescriptions turns off all completion descriptions for shells + // that support them + DisableDescriptions bool +} + +// NoFileCompletions can be used to disable file completion for commands that should +// not trigger file completions. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return nil, ShellCompDirectiveNoFileComp +} + // RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { flag := c.Flag(flagName) if flag == nil { return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) } + flagCompletionMutex.Lock() + defer flagCompletionMutex.Unlock() + if _, exists := flagCompletionFunctions[flag]; exists { return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) } @@ -149,10 +190,6 @@ func (c *Command) initCompleteCmd(args []string) { fmt.Fprintln(finalCmd.OutOrStdout(), comp) } - if directive >= shellCompDirectiveMaxValue { - directive = ShellCompDirectiveDefault - } - // As the last printout, print the completion directive for the completion script to parse. // The directive integer must be that last character following a single colon (:). // The completion script expects : @@ -195,23 +232,41 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // Unable to find the real command. E.g., someInvalidCmd return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) } + finalCmd.ctx = c.ctx // Check if we are doing flag value completion before parsing the flags. // This is important because if we are completing a flag value, we need to also // remove the flag name argument from the list of finalArgs or else the parsing // could fail due to an invalid value (incomplete) for the flag. - flag, finalArgs, toComplete, err := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) - if err != nil { - // Error while attempting to parse flags - return finalCmd, []string{}, ShellCompDirectiveDefault, err - } + flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) + + // Check if interspersed is false or -- was set on a previous arg. + // This works by counting the arguments. Normally -- is not counted as arg but + // if -- was already set or interspersed is false and there is already one arg then + // the extra added -- is counted as arg. + flagCompletion := true + _ = finalCmd.ParseFlags(append(finalArgs, "--")) + newArgCount := finalCmd.Flags().NArg() // Parse the flags early so we can check if required flags are set if err = finalCmd.ParseFlags(finalArgs); err != nil { return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) } - if flag != nil { + realArgCount := finalCmd.Flags().NArg() + if newArgCount > realArgCount { + // don't do flag completion (see above) + flagCompletion = false + } + // Error while attempting to parse flags + if flagErr != nil { + // If error type is flagCompError and we don't want flagCompletion we should ignore the error + if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + } + } + + if flag != nil && flagCompletion { // Check if we are completing a flag value subject to annotations if validExts, present := flag.Annotations[BashCompFilenameExt]; present { if len(validExts) != 0 { @@ -238,7 +293,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // When doing completion of a flag name, as soon as an argument starts with // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires // the flag name to be complete - if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") { + if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { var completions []string // First check for required flags @@ -302,7 +357,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { // We only complete sub-commands if: // - there are no arguments on the command-line and - // - there are no local, non-peristent flag on the command-line or TraverseChildren is true + // - there are no local, non-persistent flags on the command-line or TraverseChildren is true for _, subCmd := range finalCmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { @@ -351,8 +406,10 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // Find the completion function for the flag or command var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - if flag != nil { + if flag != nil && flagCompletion { + flagCompletionMutex.RLock() completionFn = flagCompletionFunctions[flag] + flagCompletionMutex.RUnlock() } else { completionFn = finalCmd.ValidArgsFunction } @@ -435,6 +492,7 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p var flagName string trimmedArgs := args flagWithEqual := false + orgLastArg := lastArg // When doing completion of a flag name, as soon as an argument starts with // a '-' we know it is a flag. We cannot use isFlagArg() here as that function @@ -442,7 +500,16 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p if len(lastArg) > 0 && lastArg[0] == '-' { if index := strings.Index(lastArg, "="); index >= 0 { // Flag with an = - flagName = strings.TrimLeft(lastArg[:index], "-") + if strings.HasPrefix(lastArg[:index], "--") { + // Flag has full name + flagName = lastArg[2:index] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = lastArg[index-1 : index] + } lastArg = lastArg[index+1:] flagWithEqual = true } else { @@ -459,8 +526,16 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p // If the flag contains an = it means it has already been fully processed, // so we don't need to deal with it here. if index := strings.Index(prevArg, "="); index < 0 { - flagName = strings.TrimLeft(prevArg, "-") - + if strings.HasPrefix(prevArg, "--") { + // Flag has full name + flagName = prevArg[2:] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = prevArg[len(prevArg)-1:] + } // Remove the uncompleted flag or else there could be an error created // for an invalid value for that flag trimmedArgs = args[:len(args)-1] @@ -476,9 +551,8 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p flag := findFlag(finalCmd, flagName) if flag == nil { - // Flag not supported by this command, nothing to complete - err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName) - return nil, nil, "", err + // Flag not supported by this command, the interspersed option might be set so return the original args + return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName} } if !flagWithEqual { @@ -494,6 +568,156 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p return flag, trimmedArgs, lastArg, nil } +// initDefaultCompletionCmd adds a default 'completion' command to c. +// This function will do nothing if any of the following is true: +// 1- the feature has been explicitly disabled by the program, +// 2- c has no subcommands (to avoid creating one), +// 3- c already has a 'completion' command provided by the program. +func (c *Command) initDefaultCompletionCmd() { + if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { + return + } + + for _, cmd := range c.commands { + if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) { + // A completion command is already available + return + } + } + + haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + + completionCmd := &Command{ + Use: compCmdName, + Short: "generate the autocompletion script for the specified shell", + Long: fmt.Sprintf(` +Generate the autocompletion script for %[1]s for the specified shell. +See each sub-command's help for details on how to use the generated script. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + } + c.AddCommand(completionCmd) + + out := c.OutOrStdout() + noDesc := c.CompletionOptions.DisableDescriptions + shortDesc := "generate the autocompletion script for %s" + bash := &Command{ + Use: "bash", + Short: fmt.Sprintf(shortDesc, "bash"), + Long: fmt.Sprintf(` +Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: +$ source <(%[1]s completion bash) + +To load completions for every new session, execute once: +Linux: + $ %[1]s completion bash > /etc/bash_completion.d/%[1]s +MacOS: + $ %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s + +You will need to start a new shell for this setup to take effect. + `, c.Root().Name()), + Args: NoArgs, + DisableFlagsInUseLine: true, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenBashCompletionV2(out, !noDesc) + }, + } + if haveNoDescFlag { + bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + zsh := &Command{ + Use: "zsh", + Short: fmt.Sprintf(shortDesc, "zsh"), + Long: fmt.Sprintf(` +Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + +$ echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions for every new session, execute once: +# Linux: +$ %[1]s completion zsh > "${fpath[1]}/_%[1]s" +# macOS: +$ %[1]s completion zsh > /usr/local/share/zsh/site-functions/_%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenZshCompletionNoDesc(out) + } + return cmd.Root().GenZshCompletion(out) + }, + } + if haveNoDescFlag { + zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + fish := &Command{ + Use: "fish", + Short: fmt.Sprintf(shortDesc, "fish"), + Long: fmt.Sprintf(` +Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: +$ %[1]s completion fish | source + +To load completions for every new session, execute once: +$ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenFishCompletion(out, !noDesc) + }, + } + if haveNoDescFlag { + fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + powershell := &Command{ + Use: "powershell", + Short: fmt.Sprintf(shortDesc, "powershell"), + Long: fmt.Sprintf(` +Generate the autocompletion script for powershell. + +To load completions in your current shell session: +PS C:\> %[1]s completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenPowerShellCompletion(out) + } + return cmd.Root().GenPowerShellCompletionWithDesc(out) + + }, + } + if haveNoDescFlag { + powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + completionCmd.AddCommand(bash, zsh, fish, powershell) +} + func findFlag(cmd *Command, name string) *pflag.Flag { flagSet := cmd.Flags() if len(name) == 1 { diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 3e112347d..bb57fd568 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -21,44 +21,47 @@ func genFishComp(buf io.StringWriter, name string, includeDesc bool) { WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) WriteStringAndCheck(buf, fmt.Sprintf(` function __%[1]s_debug - set file "$BASH_COMP_DEBUG_FILE" + set -l file "$BASH_COMP_DEBUG_FILE" if test -n "$file" echo "$argv" >> $file end end function __%[1]s_perform_completion - __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv" + __%[1]s_debug "Starting __%[1]s_perform_completion" - set args (string split -- " " "$argv") - set lastArg "$args[-1]" + # Extract all args except the last one + set -l args (commandline -opc) + # Extract the last arg and escape it in case it is a space + set -l lastArg (string escape -- (commandline -ct)) __%[1]s_debug "args: $args" __%[1]s_debug "last arg: $lastArg" - set emptyArg "" - if test -z "$lastArg" - __%[1]s_debug "Setting emptyArg" - set emptyArg \"\" - end - __%[1]s_debug "emptyArg: $emptyArg" + set -l requestComp "$args[1] %[3]s $args[2..-1] $lastArg" - if not type -q "$args[1]" - # This can happen when "complete --do-complete %[2]s" is called when running this script. - __%[1]s_debug "Cannot find $args[1]. No completions." - return - end - - set requestComp "$args[1] %[3]s $args[2..-1] $emptyArg" __%[1]s_debug "Calling $requestComp" + set -l results (eval $requestComp 2> /dev/null) - set results (eval $requestComp 2> /dev/null) - set comps $results[1..-2] - set directiveLine $results[-1] + # Some programs may output extra empty lines after the directive. + # Let's ignore them or else it will break completion. + # Ref: https://github.com/spf13/cobra/issues/1279 + for line in $results[-1..1] + if test (string trim -- $line) = "" + # Found an empty line, remove it + set results $results[1..-2] + else + # Found non-empty line, we have our proper output + break + end + end + + set -l comps $results[1..-2] + set -l directiveLine $results[-1] # For Fish, when completing a flag with an = (e.g., -n=) # completions must be prefixed with the flag - set flagPrefix (string match -r -- '-.*=' "$lastArg") + set -l flagPrefix (string match -r -- '-.*=' "$lastArg") __%[1]s_debug "Comps: $comps" __%[1]s_debug "DirectiveLine: $directiveLine" @@ -71,115 +74,124 @@ function __%[1]s_perform_completion printf "%%s\n" "$directiveLine" end -# This function does three things: -# 1- Obtain the completions and store them in the global __%[1]s_comp_results -# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed -# and unset it otherwise -# 3- Return true if the completion results are not empty +# This function does two things: +# - Obtain the completions and store them in the global __%[1]s_comp_results +# - Return false if file completion should be performed function __%[1]s_prepare_completions + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + # Start fresh - set --erase __%[1]s_comp_do_file_comp set --erase __%[1]s_comp_results - # Check if the command-line is already provided. This is useful for testing. - if not set --query __%[1]s_comp_commandLine - # Use the -c flag to allow for completion in the middle of the line - set __%[1]s_comp_commandLine (commandline -c) - end - __%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine" - - set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine") - set --erase __%[1]s_comp_commandLine + set -l results (__%[1]s_perform_completion) __%[1]s_debug "Completion results: $results" if test -z "$results" __%[1]s_debug "No completion, probably due to a failure" # Might as well do file completion, in case it helps - set --global __%[1]s_comp_do_file_comp 1 return 1 end - set directive (string sub --start 2 $results[-1]) + set -l directive (string sub --start 2 $results[-1]) set --global __%[1]s_comp_results $results[1..-2] __%[1]s_debug "Completions are: $__%[1]s_comp_results" __%[1]s_debug "Directive is: $directive" - set shellCompDirectiveError %[4]d - set shellCompDirectiveNoSpace %[5]d - set shellCompDirectiveNoFileComp %[6]d - set shellCompDirectiveFilterFileExt %[7]d - set shellCompDirectiveFilterDirs %[8]d + set -l shellCompDirectiveError %[4]d + set -l shellCompDirectiveNoSpace %[5]d + set -l shellCompDirectiveNoFileComp %[6]d + set -l shellCompDirectiveFilterFileExt %[7]d + set -l shellCompDirectiveFilterDirs %[8]d if test -z "$directive" set directive 0 end - set compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) + set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) if test $compErr -eq 1 __%[1]s_debug "Received error directive: aborting." # Might as well do file completion, in case it helps - set --global __%[1]s_comp_do_file_comp 1 return 1 end - set filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) - set dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) + set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) + set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) if test $filefilter -eq 1; or test $dirfilter -eq 1 __%[1]s_debug "File extension filtering or directory filtering not supported" # Do full file completion instead - set --global __%[1]s_comp_do_file_comp 1 return 1 end - set nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) - set nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) + set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) + set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" - # Important not to quote the variable for count to work - set numComps (count $__%[1]s_comp_results) - __%[1]s_debug "numComps: $numComps" + # If we want to prevent a space, or if file completion is NOT disabled, + # we need to count the number of valid completions. + # To do so, we will filter on prefix as the completions we have received + # may not already be filtered so as to allow fish to match on different + # criteria than the prefix. + if test $nospace -ne 0; or test $nofiles -eq 0 + set -l prefix (commandline -t | string escape --style=regex) + __%[1]s_debug "prefix: $prefix" - if test $numComps -eq 1; and test $nospace -ne 0 - # To support the "nospace" directive we trick the shell - # by outputting an extra, longer completion. - __%[1]s_debug "Adding second completion to perform nospace directive" - set --append __%[1]s_comp_results $__%[1]s_comp_results[1]. + set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results) + set --global __%[1]s_comp_results $completions + __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results" + + # Important not to quote the variable for count to work + set -l numComps (count $__%[1]s_comp_results) + __%[1]s_debug "numComps: $numComps" + + if test $numComps -eq 1; and test $nospace -ne 0 + # We must first split on \t to get rid of the descriptions to be + # able to check what the actual completion will be. + # We don't need descriptions anyway since there is only a single + # real completion which the shell will expand immediately. + set -l split (string split --max 1 \t $__%[1]s_comp_results[1]) + + # Fish won't add a space if the completion ends with any + # of the following characters: @=/:., + set -l lastChar (string sub -s -1 -- $split) + if not string match -r -q "[@=/:.,]" -- "$lastChar" + # In other cases, to support the "nospace" directive we trick the shell + # by outputting an extra, longer completion. + __%[1]s_debug "Adding second completion to perform nospace directive" + set --global __%[1]s_comp_results $split[1] $split[1]. + __%[1]s_debug "Completions are now: $__%[1]s_comp_results" + end + end + + if test $numComps -eq 0; and test $nofiles -eq 0 + # To be consistent with bash and zsh, we only trigger file + # completion when there are no other completions + __%[1]s_debug "Requesting file completion" + return 1 + end end - if test $numComps -eq 0; and test $nofiles -eq 0 - __%[1]s_debug "Requesting file completion" - set --global __%[1]s_comp_do_file_comp 1 - end - - # If we don't want file completion, we must return true even if there - # are no completions found. This is because fish will perform the last - # completion command, even if its condition is false, if no other - # completion command was triggered - return (not set --query __%[1]s_comp_do_file_comp) + return 0 end # Since Fish completions are only loaded once the user triggers them, we trigger them ourselves # so we can properly delete any completions provided by another script. -# The space after the the program name is essential to trigger completion for the program -# and not completion of the program name itself. -complete --do-complete "%[2]s " > /dev/null 2>&1 -# Using '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. +# Only do this if the program can be found, or else fish may print some errors; besides, +# the existing completions will only be loaded if the program can be found. +if type -q "%[2]s" + # The space after the program name is essential to trigger completion for the program + # and not completion of the program name itself. + # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. + complete --do-complete "%[2]s " > /dev/null 2>&1 +end # Remove any pre-existing completions for the program since we will be handling all of them. complete -c %[2]s -e -# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions -# is called first. It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable. -# -# This completion will be run second as complete commands are added FILO. -# It triggers file completion choices when __%[1]s_comp_do_file_comp is set. -complete -c %[2]s -n 'set --query __%[1]s_comp_do_file_comp' - -# This completion will be run first as complete commands are added FILO. -# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results and __%[1]s_comp_do_file_comp. -# It provides the program's completion choices. +# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results +# which provides the program's completion choices. complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' `, nameForVar, name, compCmd, diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index c55be71cd..59234c09f 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -86,7 +86,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" `+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` -`+" $RequestComp=\"$RequestComp\" + ' `\"`\"' "+` +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` } __%[1]s_debug "Calling $RequestComp" @@ -140,6 +140,25 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { $Space = "" } + if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or + (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { + __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" + + # return here to prevent the completion of the extensions + return + } + + $Values = $Values | Where-Object { + # filter the result + $_.Name -like "$WordToComplete*" + + # Join the flag back if we have an equal sign flag + if ( $IsEqualFlag ) { + __%[1]s_debug "Join the equal sign flag back to the completion value" + $_.Name = $Flag + "=" + $_.Name + } + } + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { __%[1]s_debug "ShellCompDirectiveNoFileComp is called" @@ -153,32 +172,13 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { } } - if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or - (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { - __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" - - # return here to prevent the completion of the extensions - return - } - - $Values = $Values | Where-Object { - # filter the result - $_.Name -like "$WordToComplete*" - - # Join the flag back if we have a equal sign flag - if ( $IsEqualFlag ) { - __%[1]s_debug "Join the equal sign flag back to the completion value" - $_.Name = $Flag + "=" + $_.Name - } - } - # Get the current mode $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function __%[1]s_debug "Mode: $Mode" $Values | ForEach-Object { - # store temporay because switch will overwrite $_ + # store temporary because switch will overwrite $_ $comp = $_ # PowerShell supports three different completion modes diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md index cd533ac3d..4ba06a11c 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -7,6 +7,15 @@ The currently supported shells are: - fish - PowerShell +Cobra will automatically provide your program with a fully functional `completion` command, +similarly to how it provides the `help` command. + +## Creating your own completion command + +If you do not wish to use the default `completion` command, you can choose to +provide your own, which will take precedence over the default one. (This also provides +backwards-compatibility with programs that already have their own `completion` command.) + If you are using the generator, you can create a completion command by running ```bash @@ -70,7 +79,7 @@ PowerShell: case "fish": cmd.Root().GenFishCompletion(os.Stdout, true) case "powershell": - cmd.Root().GenPowerShellCompletion(os.Stdout) + cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) } }, } @@ -78,6 +87,26 @@ PowerShell: **Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. +## Adapting the default completion command + +Cobra provides a few options for the default `completion` command. To configure such options you must set +the `CompletionOptions` field on the *root* command. + +To tell Cobra *not* to provide the default `completion` command: +``` +rootCmd.CompletionOptions.DisableDefaultCmd = true +``` + +To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: +``` +rootCmd.CompletionOptions.DisableNoDescFlag = true +``` + +To tell Cobra to completely disable descriptions for completions: +``` +rootCmd.CompletionOptions.DisableDescriptions = true +``` + # Customizing completions The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. @@ -323,7 +352,10 @@ cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, ``` ### Descriptions for completions -`zsh`, `fish` and `powershell` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh: +Cobra provides support for completion descriptions. Such descriptions are supported for each shell +(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). +For commands and flags, Cobra will provide the descriptions automatically, based on usage information. +For example, using zsh: ``` $ helm s[tab] search -- search for a keyword in charts @@ -336,7 +368,7 @@ $ helm s[tab] search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) ``` -Cobra allows you to add annotations to your own completions. Simply add the annotation text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: +Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: ```go ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp @@ -371,6 +403,37 @@ completion firstcommand secondcommand For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. Please refer to [Bash Completions](bash_completions.md) for details. +### Bash completion V2 + +Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling +`GenBashCompletion()` or `GenBashCompletionFile()`. + +A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or +`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion +(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion +solution described in this document. +Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash +completion V2 solution which provides the following extra features: +- Supports completion descriptions (like the other shells) +- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) +- Streamlined user experience thanks to a completion behavior aligned with the other shells + +`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` +you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra +will provide the description automatically based on usage information. You can choose to make this option configurable by +your users. + +``` +# With descriptions +$ helm s[tab][tab] +search (search for a keyword in charts) status (display the status of the named release) +show (show information of a chart) + +# Without descriptions +$ helm s[tab][tab] +search show status +``` +**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. ## Zsh completions Cobra supports native zsh completion generated from the root `cobra.Command`. diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md new file mode 100644 index 000000000..311abce28 --- /dev/null +++ b/vendor/github.com/spf13/cobra/user_guide.md @@ -0,0 +1,637 @@ +# User Guide + +While you are welcome to provide your own organization, typically a Cobra-based +application will follow the following organizational structure: + +``` + ▾ appName/ + ▾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. + +```go +package main + +import ( + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +## Using the Cobra Generator + +Cobra provides its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. + +## Using the Cobra Library + +To manually implement Cobra you need to create a bare main.go file and a rootCmd file. +You will optionally provide additional commands as you see fit. + +### Create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} +``` + +You will additionally define flags and handle configuration in your init() function. + +For example cmd/root.go: + +```go +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + // Used for flags. + cfgFile string + userLicense string + + rootCmd = &cobra.Command{ + Use: "cobra", + Short: "A generator for Cobra based Applications", + Long: `Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + } +) + +// Execute executes the root command. +func Execute() error { + return rootCmd.Execute() +} + +func init() { + cobra.OnInitialize(initConfig) + + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") + rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") + rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") + + rootCmd.AddCommand(addCmd) + rootCmd.AddCommand(initCmd) +} + +func initConfig() { + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := os.UserHomeDir() + cobra.CheckErr(err) + + // Search config in home directory with name ".cobra" (without extension). + viper.AddConfigPath(home) + viper.SetConfigType("yaml") + viper.SetConfigName(".cobra") + } + + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. + +```go +package main + +import ( + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +### Returning and handling errors + +If you wish to return an error to the caller of a command, `RunE` can be used. + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(tryCmd) +} + +var tryCmd = &cobra.Command{ + Use: "try", + Short: "Try and possibly fail at something", + RunE: func(cmd *cobra.Command, args []string) error { + if err := someFunc(); err != nil { + return err + } + return nil + }, +} +``` + +The error can then be caught at the execute function call. + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent', meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally, which will only apply to that specific command. + +```go +localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + +### Local Flag on Parent Commands + +By default, Cobra only parses local flags on the target command, and any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will +parse local flags on each command before executing the target command. + +```go +command := cobra.Command{ + Use: "print [OPTIONS] [COMMANDS]", + TraverseChildren: true, +} +``` + +### Bind Flags with Config + +You can also bind your flags with [viper](https://github.com/spf13/viper): +```go +var author string + +func init() { + rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) +} +``` + +In this example, the persistent flag `author` is bound with `viper`. +**Note**: the variable `author` will not be set to the value from config, +when the `--author` flag is not provided by user. + +More in [viper documentation](https://github.com/spf13/viper#working-with-flags). + +### Required flags + +Flags are optional by default. If instead you wish your command to report an error +when a flag has not been set, mark it as required: +```go +rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkFlagRequired("region") +``` + +Or, for persistent flags: +```go +rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkPersistentFlagRequired("region") +``` + +## Positional and Custom Arguments + +Validation of positional arguments can be specified using the `Args` field +of `Command`. + +The following validators are built in: + +- `NoArgs` - the command will report an error if there are any positional args. +- `ArbitraryArgs` - the command will accept any args. +- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. +- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. +- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. +- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. +- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` +- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. + +An example of setting the custom validator: + +```go +var cmd = &cobra.Command{ + Short: "hello", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) < 1 { + return errors.New("requires a color argument") + } + if myapp.IsValidColor(args[0]) { + return nil + } + return fmt.Errorf("invalid color specified: %s", args[0]) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hello, World!") + }, +} +``` + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable, meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. +For many years people have printed back to the screen.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. +Echo works a lot like print, except it has a child command.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Echo: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing +a count and a string.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). + +## Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + $ cobra help + + Cobra is a CLI library for Go that empowers applications. + This application is a tool to generate the needed files + to quickly create a Cobra application. + + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or your own template for the default command to use +with following functions: + +```go +cmd.SetHelpCommand(cmd *Command) +cmd.SetHelpFunc(f func(*Command, []string)) +cmd.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage Message + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + $ cobra --invalid + Error: unknown flag: --invalid + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. +Like help, the function and template are overridable through public methods: + +```go +cmd.SetUsageFunc(f func(*Command) error) +cmd.SetUsageTemplate(s string) +``` + +## Version Flag + +Cobra adds a top-level '--version' flag if the Version field is set on the root command. +Running an application with the '--version' flag will print the version to stdout using +the version template. The template can be customized using the +`cmd.SetVersionTemplate(s string)` function. + +## PreRun and PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + rootCmd.Execute() + fmt.Println() + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + rootCmd.Execute() +} +``` + +Output: +``` +Inside rootCmd PersistentPreRun with args: [] +Inside rootCmd PreRun with args: [] +Inside rootCmd Run with args: [] +Inside rootCmd PostRun with args: [] +Inside rootCmd PersistentPostRun with args: [] + +Inside rootCmd PersistentPreRun with args: [arg1 arg2] +Inside subCmd PreRun with args: [arg1 arg2] +Inside subCmd Run with args: [arg1 arg2] +Inside subCmd PostRun with args: [arg1 arg2] +Inside subCmd PersistentPostRun with args: [arg1 arg2] +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating documentation for your command + +Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). + +## Generating shell completions + +Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 2e840285f..1afec30ea 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -95,7 +95,7 @@ _%[1]s() local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d - local lastParam lastChar flagPrefix requestComp out directive compCount comp lastComp + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace local -a completions __%[1]s_debug "\n========= starting completion logic ==========" @@ -163,7 +163,6 @@ _%[1]s() return fi - compCount=0 while IFS='\n' read -r comp; do if [ -n "$comp" ]; then # If requested, completions are returned with a description. @@ -175,13 +174,17 @@ _%[1]s() local tab=$(printf '\t') comp=${comp//$tab/:} - ((compCount++)) __%[1]s_debug "Adding completion: ${comp}" completions+=${comp} lastComp=$comp fi done < <(printf "%%s\n" "${out[@]}") + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + __%[1]s_debug "Activating nospace." + noSpace="-S ''" + fi + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then # File extension filtering local filteringCmd @@ -208,25 +211,40 @@ _%[1]s() __%[1]s_debug "Listing directories in ." fi + local result _arguments '*:dirname:_files -/'" ${flagPrefix}" + result=$? if [ -n "$subdir" ]; then popd >/dev/null 2>&1 fi - elif [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ] && [ ${compCount} -eq 1 ]; then - __%[1]s_debug "Activating nospace." - # We can use compadd here as there is no description when - # there is only one completion. - compadd -S '' "${lastComp}" - elif [ ${compCount} -eq 0 ]; then - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - __%[1]s_debug "deactivating file completion" - else - # Perform file completion - __%[1]s_debug "activating file completion" - _arguments '*:filename:_files'" ${flagPrefix}" - fi + return $result else - _describe "completions" completions $(echo $flagPrefix) + __%[1]s_debug "Calling _describe" + if eval _describe "completions" completions $flagPrefix $noSpace; then + __%[1]s_debug "_describe found some completions" + + # Return the success of having called _describe + return 0 + else + __%[1]s_debug "_describe did not find completions." + __%[1]s_debug "Checking if we should do file completion." + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + __%[1]s_debug "deactivating file completion" + + # We must return an error code here to let zsh know that there were no + # completions found by _describe; this is what will trigger other + # matching algorithms to attempt to find completions. + # For example zsh can match letters in the middle of words. + return 1 + else + # Perform file completion + __%[1]s_debug "Activating file completion" + + # We must return the result of this command, so it must be the + # last command, or else we must store its result to return it. + _arguments '*:filename:_files'" ${flagPrefix}" + fi + fi fi } diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 3b99bf0ac..fdfef8808 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,5 +1,49 @@ # Changelog +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. + +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + ## 1.17.0 (25 May 2021) Bugfixes: diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 3f4b86e08..9e929cd98 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -106,6 +106,24 @@ func (b *Buffer) Write(bs []byte) (int, error) { return len(bs), nil } +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + // TrimNewline trims any final "\n" byte from the end of the buffer. func (b *Buffer) TrimNewline() { if i := len(b.bs) - 1; i >= 0 { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index 553f258e7..f116bd936 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -26,7 +26,6 @@ import ( "os" "runtime" "strings" - "time" "go.uber.org/zap/zapcore" ) @@ -51,6 +50,8 @@ type Logger struct { addStack zapcore.LevelEnabler callerSkip int + + clock zapcore.Clock } // New constructs a new Logger from the provided zapcore.Core and Options. If @@ -71,6 +72,7 @@ func New(core zapcore.Core, options ...Option) *Logger { core: core, errorOutput: zapcore.Lock(os.Stderr), addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, } return log.WithOptions(options...) } @@ -85,6 +87,7 @@ func NewNop() *Logger { core: zapcore.NewNopCore(), errorOutput: zapcore.AddSync(ioutil.Discard), addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, } } @@ -270,7 +273,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // log message will actually be written somewhere. ent := zapcore.Entry{ LoggerName: log.name, - Time: time.Now(), + Time: log.clock.Now(), Level: lvl, Message: msg, } @@ -307,7 +310,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { if log.addCaller { frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) if !defined { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) log.errorOutput.Sync() } diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 0135c2092..e9e66161f 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -138,3 +138,11 @@ func OnFatal(action zapcore.CheckWriteAction) Option { log.onFatal = action }) } + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 4084dada7..0b9651981 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -266,7 +266,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { // Make sure this element isn't a dangling key. if i == len(args)-1 { - s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) break } @@ -287,7 +287,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { // If we encountered any invalid key-value pairs, log an error. if len(invalid) > 0 { - s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) } return fields } diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 000000000..ef2f7d963 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,188 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + stopped bool // whether Stop() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 000000000..d2ea95b39 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,50 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" +) + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 4aa8b4f90..0885505b7 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -208,7 +208,7 @@ func (ce *CheckedEntry) Write(fields ...Field) { // If the entry is dirty, log an internal error; because the // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) ce.ErrorOutput.Sync() } return @@ -219,11 +219,9 @@ func (ce *CheckedEntry) Write(fields ...Field) { for i := range ce.cores { err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) } - if ce.ErrorOutput != nil { - if err != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) - ce.ErrorOutput.Sync() - } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + ce.ErrorOutput.Sync() } should, msg := ce.should, ce.Message diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index f2a07d786..74919b0cc 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -83,7 +83,7 @@ type errorGroup interface { Errors() []error } -// Note that errArry and errArrayElem are very similar to the version +// Note that errArray and errArrayElem are very similar to the version // implemented in the top-level error.go file. We can't re-use this because // that would require exporting errArray as part of the zapcore API. diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index 25f10ca1d..31ed96e12 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -197,12 +197,14 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { return ce } - counter := s.counts.get(ent.Level, ent.Message) - n := counter.IncCheckReset(ent.Time, s.tick) - if n > s.first && (n-s.first)%s.thereafter != 0 { - s.hook(ent, LogDropped) - return ce + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (n-s.first)%s.thereafter != 0 { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) } - s.hook(ent, LogSampled) return s.Core.Check(ent, ce) } diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go index d3596ee66..83c776de0 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -117,6 +117,19 @@ func (b *Builder) AddASN1GeneralizedTime(t time.Time) { }) } +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + // AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not // support BIT STRINGs that are not a whole number of bytes. func (b *Builder) AddASN1BitString(data []byte) { @@ -466,6 +479,45 @@ func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { return true } +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + // ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. // It reports whether the read was successful. func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s index 2cb037314..1d74f0f88 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s index 5cd7494b2..58422aad2 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego #include "textflag.h" @@ -82,7 +83,7 @@ multiply: BGE loop bytes_between_0_and_15: - CMP $0, R5 + CMP R5, $0 BEQ done MOVD $0, R16 // h0 MOVD $0, R17 // h1 @@ -122,7 +123,7 @@ just1: // Exactly 8 MOVD (R4), R16 - CMP $0, R17 + CMP R17, $0 // Check if we've already set R17; if not // set 1 to indicate end of msg. @@ -151,7 +152,7 @@ less4: ADD $2, R4 less2: - CMP $0, R5 + CMP R5, $0 BEQ insert1 MOVBZ (R4), R21 SLD R22, R21, R21 @@ -166,12 +167,12 @@ insert1: carry: // Add new values to h0, h1, h2 - ADDC R16, R8 - ADDE R17, R9 - ADDE $0, R10 - MOVD $16, R5 - ADD R5, R4 - BR multiply + ADDC R16, R8 + ADDE R17, R9 + ADDZE R10, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply done: // Save h0, h1, h2 in state diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s index bdd882c60..69c64f842 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s index f97efc676..c08927720 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && !purego && gc // +build amd64,!purego,gc // This code was translated into a form compatible with 6a from the public diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa37..000000000 --- a/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/ascii.go b/vendor/golang.org/x/net/http2/ascii.go index 0c58d727c..17caa2058 100644 --- a/vendor/golang.org/x/net/http2/ascii.go +++ b/vendor/golang.org/x/net/http2/ascii.go @@ -6,6 +6,10 @@ package http2 import "strings" +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + // asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t // are equal, ASCII-case-insensitively. func asciiEqualFold(s, t string) bool { diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 09bc70533..19e449cfa 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -259,16 +259,12 @@ func ConfigureServer(s *http.Server, conf *Server) error { s.TLSConfig.PreferServerCipherSuites = true - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { + if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) { s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) } + if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1") + } if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} @@ -820,7 +816,7 @@ func (sc *serverConn) serve() { }) sc.unackedSettings++ - // Each connection starts with intialWindowSize inflow tokens. + // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) @@ -860,6 +856,15 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + // Process any written frames before reading new frames from the client since a + // written frame could have triggered a new stream to be started. + if sc.writingFrameAsync { + select { + case wroteRes := <-sc.wroteFrameCh: + sc.wroteFrame(wroteRes) + default: + } + } if !sc.processFrameFromReader(res) { return } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 7bd4b9c19..b261beb1d 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -264,9 +264,8 @@ type ClientConn struct { peerMaxHeaderListSize uint64 initialWindowSize uint32 - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder wmu sync.Mutex // held while writing; acquire AFTER mu if holding both werr error // first write error that has occurred @@ -386,8 +385,13 @@ func (cs *clientStream) abortRequestBodyWrite(err error) { } cc := cs.cc cc.mu.Lock() - cs.stopReqBody = err - cc.cond.Broadcast() + if cs.stopReqBody == nil { + cs.stopReqBody = err + if cs.req.Body != nil { + cs.req.Body.Close() + } + cc.cond.Broadcast() + } cc.mu.Unlock() } @@ -913,46 +917,6 @@ func (cc *ClientConn) closeForLostPing() error { return cc.closeForError(err) } -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") @@ -1151,40 +1115,28 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf return res, false, nil } + handleError := func(err error) (*http.Response, bool, error) { + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + <-bodyWriter.resc + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), err + } + for { select { case re := <-readLoopResCh: return handleReadLoopResponse(re) case <-respHeaderTimer: - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - <-bodyWriter.resc - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), errTimeout + return handleError(errTimeout) case <-ctx.Done(): - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - <-bodyWriter.resc - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), ctx.Err() + return handleError(ctx.Err()) case <-req.Cancel: - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - <-bodyWriter.resc - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), errRequestCanceled + return handleError(errRequestCanceled) case <-cs.peerReset: // processResetStream already removed the // stream from the streams map; no need for @@ -1295,11 +1247,35 @@ var ( errReqBodyTooLong = errors.New("http2: request body larger than specified content length") ) +// frameScratchBufferLen returns the length of a buffer to use for +// outgoing request bodies to read/write to/from. +// +// It returns max(1, min(peer's advertised max frame size, +// Request.ContentLength+1, 512KB)). +func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { + const max = 512 << 10 + n := int64(maxFrameSize) + if n > max { + n = max + } + if cl := actualContentLength(cs.req); cl != -1 && cl+1 < n { + // Add an extra byte past the declared content-length to + // give the caller's Request.Body io.Reader a chance to + // give us more bytes than they declared, so we can catch it + // early. + n = cl + 1 + } + if n < 1 { + return 1 + } + return int(n) // doesn't truncate; max is 512K +} + +var bufPool sync.Pool // of *[]byte + func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { cc := cs.cc sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) defer func() { traceWroteRequest(cs.trace, err) @@ -1307,7 +1283,13 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( // Request.Body is closed by the Transport, // and in multiple cases: server replies <=299 and >299 // while still writing request body - cerr := bodyCloser.Close() + var cerr error + cc.mu.Lock() + if cs.stopReqBody == nil { + cs.stopReqBody = errStopReqBodyWrite + cerr = bodyCloser.Close() + } + cc.mu.Unlock() if err == nil { err = cerr } @@ -1318,9 +1300,24 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( remainLen := actualContentLength(req) hasContentLen := remainLen != -1 + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + // Scratch buffer for reading into & writing from. + scratchLen := cs.frameScratchBufferLen(maxFrameSize) + var buf []byte + if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { + defer bufPool.Put(bp) + buf = *bp + } else { + buf = make([]byte, scratchLen) + defer bufPool.Put(&buf) + } + var sawEOF bool for !sawEOF { - n, err := body.Read(buf[:len(buf)-1]) + n, err := body.Read(buf[:len(buf)]) if hasContentLen { remainLen -= int64(n) if remainLen == 0 && err == nil { @@ -1331,8 +1328,9 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( // to send the END_STREAM bit early, double-check that we're actually // at EOF. Subsequent reads should return (0, EOF) at this point. // If either value is different, we return an error in one of two ways below. + var scratch [1]byte var n1 int - n1, err = body.Read(buf[n:]) + n1, err = body.Read(scratch[:]) remainLen -= int64(n1) } if remainLen < 0 { @@ -1402,10 +1400,6 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( } } - cc.mu.Lock() - maxFrameSize := int(cc.maxFrameSize) - cc.mu.Unlock() - cc.wmu.Lock() defer cc.wmu.Unlock() diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 8cfd6063e..1473e1296 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -1,7 +1,7 @@ # OAuth2 for Go +[![Go Reference](https://pkg.go.dev/badge/golang.org/x/oauth2.svg)](https://pkg.go.dev/golang.org/x/oauth2) [![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) -[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) oauth2 package contains a client implementation for OAuth 2.0 spec. @@ -14,17 +14,17 @@ go get golang.org/x/oauth2 Or you can manually git clone the repository to `$(go env GOPATH)/src/golang.org/x/oauth2`. -See godoc for further documentation and examples. +See pkg.go.dev for further documentation and examples. -* [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) +* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) +* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) ## Policy for new packages We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a single endpoint, add it to the -[godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints) +[pkg.go.dev/golang.org/x/oauth2/endpoints](https://pkg.go.dev/golang.org/x/oauth2/endpoints) package. ## Report Issues / Send Patches diff --git a/vendor/golang.org/x/oauth2/authhandler/authhandler.go b/vendor/golang.org/x/oauth2/authhandler/authhandler.go new file mode 100644 index 000000000..69967cf87 --- /dev/null +++ b/vendor/golang.org/x/oauth2/authhandler/authhandler.go @@ -0,0 +1,56 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package authhandler implements a TokenSource to support +// "three-legged OAuth 2.0" via a custom AuthorizationHandler. +package authhandler + +import ( + "context" + "errors" + + "golang.org/x/oauth2" +) + +// AuthorizationHandler is a 3-legged-OAuth helper that prompts +// the user for OAuth consent at the specified auth code URL +// and returns an auth code and state upon approval. +type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) + +// TokenSource returns an oauth2.TokenSource that fetches access tokens +// using 3-legged-OAuth flow. +// +// The provided context.Context is used for oauth2 Exchange operation. +// +// The provided oauth2.Config should be a full configuration containing AuthURL, +// TokenURL, and Scope. +// +// An environment-specific AuthorizationHandler is used to obtain user consent. +// +// Per the OAuth protocol, a unique "state" string should be specified here. +// This token source will verify that the "state" is identical in the request +// and response before exchanging the auth code for OAuth token to prevent CSRF +// attacks. +func TokenSource(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state}) +} + +type authHandlerSource struct { + ctx context.Context + config *oauth2.Config + authHandler AuthorizationHandler + state string +} + +func (source authHandlerSource) Token() (*oauth2.Token, error) { + url := source.config.AuthCodeURL(source.state) + code, state, err := source.authHandler(url) + if err != nil { + return nil, err + } + if state != source.state { + return nil, errors.New("state mismatch in 3-legged-OAuth flow") + } + return source.config.Exchange(source.ctx, code) +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go index 83dacac32..16c6c6b90 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build appengine // +build appengine // This file applies to App Engine first generation runtimes (<= Go 1.9). diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go index 04c2c2216..a7e27b3d2 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index ad2c09236..880dd7b59 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -16,11 +16,16 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" + "golang.org/x/oauth2/authhandler" ) // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials +// Credentials from external accounts (workload identity federation) are used to +// identify a particular application from an on-prem or non-Google Cloud platform +// including Amazon Web Services (AWS), Microsoft Azure or any identity provider +// that supports OpenID Connect (OIDC). type Credentials struct { ProjectID string // may be empty TokenSource oauth2.TokenSource @@ -37,6 +42,32 @@ type Credentials struct { // Deprecated: use Credentials instead. type DefaultCredentials = Credentials +// CredentialsParams holds user supplied parameters that are used together +// with a credentials file for building a Credentials object. +type CredentialsParams struct { + // Scopes is the list OAuth scopes. Required. + // Example: https://www.googleapis.com/auth/cloud-platform + Scopes []string + + // Subject is the user email used for domain wide delegation (see + // https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). + // Optional. + Subject string + + // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Optional. + AuthHandler authhandler.AuthorizationHandler + + // State is a unique string used with AuthHandler. Optional. + State string +} + +func (params CredentialsParams) deepCopy() CredentialsParams { + paramsCopy := params + paramsCopy.Scopes = make([]string, len(params.Scopes)) + copy(paramsCopy.Scopes, params.Scopes) + return paramsCopy +} + // DefaultClient returns an HTTP Client that uses the // DefaultTokenSource to obtain authentication credentials. func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { @@ -58,13 +89,17 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc return creds.TokenSource, nil } -// FindDefaultCredentials searches for "Application Default Credentials". +// FindDefaultCredentialsWithParams searches for "Application Default Credentials". // // It looks for credentials in the following places, // preferring the first location found: // // 1. A JSON file whose path is specified by the // GOOGLE_APPLICATION_CREDENTIALS environment variable. +// For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on +// how to generate the JSON configuration file for on-prem/non-Google cloud +// platforms. // 2. A JSON file in a location known to the gcloud command-line tool. // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. @@ -73,11 +108,14 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc // 4. On Google Compute Engine, Google App Engine standard second generation runtimes // (>= Go 1.11), and Google App Engine flexible environment, it fetches // credentials from the metadata server. -func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { +func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { + // Make defensive copy of the slices in params. + params = params.deepCopy() + // First, try the environment variable. const envVar = "GOOGLE_APPLICATION_CREDENTIALS" if filename := os.Getenv(envVar); filename != "" { - creds, err := readCredentialsFile(ctx, filename, scopes) + creds, err := readCredentialsFile(ctx, filename, params) if err != nil { return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) } @@ -86,7 +124,7 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials // Second, try a well-known file. filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { + if creds, err := readCredentialsFile(ctx, filename, params); err == nil { return creds, nil } else if !os.IsNotExist(err) { return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) @@ -98,7 +136,7 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials if appengineTokenFunc != nil { return &DefaultCredentials{ ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, scopes...), + TokenSource: AppEngineTokenSource(ctx, params.Scopes...), }, nil } @@ -108,7 +146,7 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials id, _ := metadata.ProjectID() return &DefaultCredentials{ ProjectID: id, - TokenSource: ComputeTokenSource("", scopes...), + TokenSource: ComputeTokenSource("", params.Scopes...), }, nil } @@ -117,16 +155,38 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) } -// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can -// represent either a Google Developers Console client_credentials.json file (as in -// ConfigFromJSON) or a Google Developers service account key file (as in -// JWTConfigFromJSON). -func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { +// FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + var params CredentialsParams + params.Scopes = scopes + return FindDefaultCredentialsWithParams(ctx, params) +} + +// CredentialsFromJSONWithParams obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in ConfigFromJSON), +// a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh +// token JSON), or the JSON configuration file for workload identity federation in non-Google cloud +// platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). +func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) { + // Make defensive copy of the slices in params. + params = params.deepCopy() + + // First, attempt to parse jsonData as a Google Developers Console client_credentials.json. + config, _ := ConfigFromJSON(jsonData, params.Scopes...) + if config != nil { + return &Credentials{ + ProjectID: "", + TokenSource: authhandler.TokenSource(ctx, config, params.State, params.AuthHandler), + JSON: jsonData, + }, nil + } + + // Otherwise, parse jsonData as one of the other supported credentials files. var f credentialsFile if err := json.Unmarshal(jsonData, &f); err != nil { return nil, err } - ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + ts, err := f.tokenSource(ctx, params) if err != nil { return nil, err } @@ -137,6 +197,13 @@ func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) }, nil } +// CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes. +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + var params CredentialsParams + params.Scopes = scopes + return CredentialsFromJSONWithParams(ctx, jsonData, params) +} + func wellKnownFile() string { const f = "application_default_credentials.json" if runtime.GOOS == "windows" { @@ -145,10 +212,10 @@ func wellKnownFile() string { return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) } -func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { +func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*DefaultCredentials, error) { b, err := ioutil.ReadFile(filename) if err != nil { return nil, err } - return CredentialsFromJSON(ctx, b, scopes...) + return CredentialsFromJSONWithParams(ctx, b, params) } diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index 73be62903..8e6a57ce9 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -4,13 +4,16 @@ // Package google provides support for making OAuth2 authorized and authenticated // HTTP requests to Google APIs. It supports the Web server flow, client-side -// credentials, service accounts, Google Compute Engine service accounts, and Google -// App Engine service accounts. +// credentials, service accounts, Google Compute Engine service accounts, +// Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. // // A brief overview of the package follows. For more information, please read // https://developers.google.com/accounts/docs/OAuth2 // and // https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. // // OAuth2 Configs // @@ -19,6 +22,35 @@ // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // +// Workload Identity Federation +// +// Using workload identity federation, your application can access Google Cloud +// resources from Amazon Web Services (AWS), Microsoft Azure or any identity +// provider that supports OpenID Connect (OIDC). +// Traditionally, applications running outside Google Cloud have used service +// account keys to access Google Cloud resources. Using identity federation, +// you can allow your workload to impersonate a service account. +// This lets you access Google Cloud resources directly, eliminating the +// maintenance and security burden associated with service account keys. +// +// Follow the detailed instructions on how to configure Workload Identity Federation +// in various platforms: +// +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws +// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure +// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// +// For OIDC providers, the library can retrieve OIDC tokens either from a +// local file location (file-sourced credentials) or from a local server +// (URL-sourced credentials). +// For file-sourced credentials, a background process needs to be continuously +// refreshing the file location with a new OIDC token prior to expiration. +// For tokens with one hour lifetimes, the token needs to be updated in the file +// every hour. The token can be stored directly as plain text or in JSON format. +// For URL-sourced credentials, a local server needs to host a GET endpoint to +// return the OIDC token. The response can be in plain text or JSON. +// Additional required request headers can also be specified. +// // // Credentials // @@ -29,6 +61,13 @@ // FindDefaultCredentials looks in some well-known places for a credentials file, and // will call AppEngineTokenSource or ComputeTokenSource as needed. // +// Application Default Credentials also support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage and +// store service account private keys locally. +// // DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, // then use the credentials to construct an http.Client or an oauth2.TokenSource. // diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 81de32b36..422ff1fe3 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -15,10 +15,11 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/externalaccount" "golang.org/x/oauth2/jwt" ) -// Endpoint is Google's OAuth 2.0 endpoint. +// Endpoint is Google's OAuth 2.0 default endpoint. var Endpoint = oauth2.Endpoint{ AuthURL: "https://accounts.google.com/o/oauth2/auth", TokenURL: "https://oauth2.googleapis.com/token", @@ -86,23 +87,25 @@ func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) } scope = append([]string(nil), scope...) // copy - return f.jwtConfig(scope), nil + return f.jwtConfig(scope, ""), nil } // JSON key file types. const ( serviceAccountKey = "service_account" userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" ) // credentialsFile is the unmarshalled representation of a credentials file. type credentialsFile struct { - Type string `json:"type"` // serviceAccountKey or userCredentialsKey + Type string `json:"type"` // Service Account fields ClientEmail string `json:"client_email"` PrivateKeyID string `json:"private_key_id"` PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` TokenURL string `json:"token_uri"` ProjectID string `json:"project_id"` @@ -111,15 +114,25 @@ type credentialsFile struct { ClientSecret string `json:"client_secret"` ClientID string `json:"client_id"` RefreshToken string `json:"refresh_token"` + + // External Account fields + Audience string `json:"audience"` + SubjectTokenType string `json:"subject_token_type"` + TokenURLExternal string `json:"token_url"` + TokenInfoURL string `json:"token_info_url"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + CredentialSource externalaccount.CredentialSource `json:"credential_source"` + QuotaProjectID string `json:"quota_project_id"` } -func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { +func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { cfg := &jwt.Config{ Email: f.ClientEmail, PrivateKey: []byte(f.PrivateKey), PrivateKeyID: f.PrivateKeyID, Scopes: scopes, TokenURL: f.TokenURL, + Subject: subject, // This is the user email to impersonate } if cfg.TokenURL == "" { cfg.TokenURL = JWTTokenURL @@ -127,20 +140,44 @@ func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { return cfg } -func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { +func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsParams) (oauth2.TokenSource, error) { switch f.Type { case serviceAccountKey: - cfg := f.jwtConfig(scopes) + cfg := f.jwtConfig(params.Scopes, params.Subject) return cfg.TokenSource(ctx), nil case userCredentialsKey: cfg := &oauth2.Config{ ClientID: f.ClientID, ClientSecret: f.ClientSecret, - Scopes: scopes, - Endpoint: Endpoint, + Scopes: params.Scopes, + Endpoint: oauth2.Endpoint{ + AuthURL: f.AuthURL, + TokenURL: f.TokenURL, + AuthStyle: oauth2.AuthStyleInParams, + }, + } + if cfg.Endpoint.AuthURL == "" { + cfg.Endpoint.AuthURL = Endpoint.AuthURL + } + if cfg.Endpoint.TokenURL == "" { + cfg.Endpoint.TokenURL = Endpoint.TokenURL } tok := &oauth2.Token{RefreshToken: f.RefreshToken} return cfg.TokenSource(ctx, tok), nil + case externalAccountKey: + cfg := &externalaccount.Config{ + Audience: f.Audience, + SubjectTokenType: f.SubjectTokenType, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + } + return cfg.TokenSource(ctx) case "": return nil, errors.New("missing 'type' field in credentials") default: diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go new file mode 100644 index 000000000..a5a5423c6 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -0,0 +1,470 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "golang.org/x/oauth2" +) + +type awsSecurityCredentials struct { + AccessKeyID string `json:"AccessKeyID"` + SecretAccessKey string `json:"SecretAccessKey"` + SecurityToken string `json:"Token"` +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsRequestSigner struct { + RegionName string + AwsSecurityCredentials awsSecurityCredentials +} + +// getenv aliases os.Getenv for testing +var getenv = os.Getenv + +const ( + // AWS Signature Version 4 signing algorithm identifier. + awsAlgorithm = "AWS4-HMAC-SHA256" + + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + awsRequestType = "aws4_request" + + // The AWS authorization header name for the security session token if available. + awsSecurityTokenHeader = "x-amz-security-token" + + // The AWS authorization header name for the auto-generated date. + awsDateHeader = "x-amz-date" + + awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatShort = "20060102" +) + +func getSha256(input []byte) (string, error) { + hash := sha256.New() + if _, err := hash.Write(input); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getHmacSha256(key, input []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + if _, err := hash.Write(input); err != nil { + return nil, err + } + return hash.Sum(nil), nil +} + +func cloneRequest(r *http.Request) *http.Request { + r2 := new(http.Request) + *r2 = *r + if r.Header != nil { + r2.Header = make(http.Header, len(r.Header)) + + // Find total number of values. + headerCount := 0 + for _, headerValues := range r.Header { + headerCount += len(headerValues) + } + copiedHeaders := make([]string, headerCount) // shared backing array for headers' values + + for headerKey, headerValues := range r.Header { + headerCount = copy(copiedHeaders, headerValues) + r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] + copiedHeaders = copiedHeaders[headerCount:] + } + } + return r2 +} + +func canonicalPath(req *http.Request) string { + result := req.URL.EscapedPath() + if result == "" { + return "/" + } + return path.Clean(result) +} + +func canonicalQuery(req *http.Request) string { + queryValues := req.URL.Query() + for queryKey := range queryValues { + sort.Strings(queryValues[queryKey]) + } + return queryValues.Encode() +} + +func canonicalHeaders(req *http.Request) (string, string) { + // Header keys need to be sorted alphabetically. + var headers []string + lowerCaseHeaders := make(http.Header) + for k, v := range req.Header { + k := strings.ToLower(k) + if _, ok := lowerCaseHeaders[k]; ok { + // include additional values + lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) + } else { + headers = append(headers, k) + lowerCaseHeaders[k] = v + } + } + sort.Strings(headers) + + var fullHeaders bytes.Buffer + for _, header := range headers { + headerValue := strings.Join(lowerCaseHeaders[header], ",") + fullHeaders.WriteString(header) + fullHeaders.WriteRune(':') + fullHeaders.WriteString(headerValue) + fullHeaders.WriteRune('\n') + } + + return strings.Join(headers, ";"), fullHeaders.String() +} + +func requestDataHash(req *http.Request) (string, error) { + var requestData []byte + if req.Body != nil { + requestBody, err := req.GetBody() + if err != nil { + return "", err + } + defer requestBody.Close() + + requestData, err = ioutil.ReadAll(io.LimitReader(requestBody, 1<<20)) + if err != nil { + return "", err + } + } + + return getSha256(requestData) +} + +func requestHost(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { + dataHash, err := requestDataHash(req) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil +} + +// SignRequest adds the appropriate headers to an http.Request +// or returns an error if something prevented this. +func (rs *awsRequestSigner) SignRequest(req *http.Request) error { + signedRequest := cloneRequest(req) + timestamp := now() + + signedRequest.Header.Add("host", requestHost(req)) + + if rs.AwsSecurityCredentials.SecurityToken != "" { + signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SecurityToken) + } + + if signedRequest.Header.Get("date") == "" { + signedRequest.Header.Add(awsDateHeader, timestamp.Format(awsTimeFormatLong)) + } + + authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) + if err != nil { + return err + } + signedRequest.Header.Set("Authorization", authorizationCode) + + req.Header = signedRequest.Header + return nil +} + +func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { + canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) + + dateStamp := timestamp.Format(awsTimeFormatShort) + serviceName := "" + if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { + serviceName = splitHost[0] + } + + credentialScope := fmt.Sprintf("%s/%s/%s/%s", dateStamp, rs.RegionName, serviceName, awsRequestType) + + requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) + if err != nil { + return "", err + } + requestHash, err := getSha256([]byte(requestString)) + if err != nil { + return "", err + } + + stringToSign := fmt.Sprintf("%s\n%s\n%s\n%s", awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash) + + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) + for _, signingInput := range []string{ + dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, + } { + signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +type awsCredentialSource struct { + EnvironmentID string + RegionURL string + RegionalCredVerificationURL string + CredVerificationURL string + TargetResource string + requestSigner *awsRequestSigner + region string + ctx context.Context + client *http.Client +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { + if cs.client == nil { + cs.client = oauth2.NewClient(cs.ctx, nil) + } + return cs.client.Do(req.WithContext(cs.ctx)) +} + +func (cs awsCredentialSource) subjectToken() (string, error) { + if cs.requestSigner == nil { + awsSecurityCredentials, err := cs.getSecurityCredentials() + if err != nil { + return "", err + } + + if cs.region, err = cs.getRegion(); err != nil { + return "", err + } + + cs.requestSigner = &awsRequestSigner{ + RegionName: cs.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequest("POST", strings.Replace(cs.RegionalCredVerificationURL, "{region}", cs.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if cs.TargetResource != "" { + req.Header.Add("x-goog-cloud-target-resource", cs.TargetResource) + } + cs.requestSigner.SignRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return url.QueryEscape(string(result)), nil +} + +func (cs *awsCredentialSource) getRegion() (string, error) { + if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { + return envAwsRegion, nil + } + if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" { + return envAwsRegion, nil + } + + if cs.RegionURL == "" { + return "", errors.New("oauth2/google: unable to determine AWS region") + } + + req, err := http.NewRequest("GET", cs.RegionURL, nil) + if err != nil { + return "", err + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google: unable to retrieve AWS region - %s", string(respBody)) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + respBodyEnd := 0 + if len(respBody) > 1 { + respBodyEnd = len(respBody) - 1 + } + return string(respBody[:respBodyEnd]), nil +} + +func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCredentials, err error) { + if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { + if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { + return awsSecurityCredentials{ + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + SecurityToken: getenv("AWS_SESSION_TOKEN"), + }, nil + } + } + + roleName, err := cs.getMetadataRoleName() + if err != nil { + return + } + + credentials, err := cs.getMetadataSecurityCredentials(roleName) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("oauth2/google: missing AccessKeyId credential") + } + + if credentials.SecretAccessKey == "" { + return result, errors.New("oauth2/google: missing SecretAccessKey credential") + } + + return credentials, nil +} + +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) (awsSecurityCredentials, error) { + var result awsSecurityCredentials + + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) + if err != nil { + return result, err + } + req.Header.Add("Content-Type", "application/json") + + resp, err := cs.doRequest(req) + if err != nil { + return result, err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return result, err + } + + if resp.StatusCode != 200 { + return result, fmt.Errorf("oauth2/google: unable to retrieve AWS security credentials - %s", string(respBody)) + } + + err = json.Unmarshal(respBody, &result) + return result, err +} + +func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { + if cs.CredVerificationURL == "" { + return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") + } + + req, err := http.NewRequest("GET", cs.CredVerificationURL, nil) + if err != nil { + return "", err + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google: unable to retrieve AWS role name - %s", string(respBody)) + } + + return string(respBody), nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go new file mode 100644 index 000000000..dab917f39 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -0,0 +1,246 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "context" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" +) + +// now aliases time.Now for testing +var now = func() time.Time { + return time.Now().UTC() +} + +// Config stores the configuration for fetching tokens with external credentials. +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec + // e.g. `urn:ietf:params:oauth:token-type:jwt`. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. + ServiceAccountImpersonationURL string + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using client_id as username and client_secret as password. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. + CredentialSource CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project which overrides the project associated with the credentials. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. + Scopes []string +} + +// Each element consists of a list of patterns. validateURLs checks for matches +// that include all elements in a given list, in that order. + +var ( + validTokenURLPatterns = []*regexp.Regexp{ + // The complicated part in the middle matches any number of characters that + // aren't period, spaces, or slashes. + regexp.MustCompile(`(?i)^[^\.\s\/\\]+\.sts\.googleapis\.com$`), + regexp.MustCompile(`(?i)^sts\.googleapis\.com$`), + regexp.MustCompile(`(?i)^sts\.[^\.\s\/\\]+\.googleapis\.com$`), + regexp.MustCompile(`(?i)^[^\.\s\/\\]+-sts\.googleapis\.com$`), + } + validImpersonateURLPatterns = []*regexp.Regexp{ + regexp.MustCompile(`^[^\.\s\/\\]+\.iamcredentials\.googleapis\.com$`), + regexp.MustCompile(`^iamcredentials\.googleapis\.com$`), + regexp.MustCompile(`^iamcredentials\.[^\.\s\/\\]+\.googleapis\.com$`), + regexp.MustCompile(`^[^\.\s\/\\]+-iamcredentials\.googleapis\.com$`), + } +) + +func validateURL(input string, patterns []*regexp.Regexp, scheme string) bool { + parsed, err := url.Parse(input) + if err != nil { + return false + } + if !strings.EqualFold(parsed.Scheme, scheme) { + return false + } + toTest := parsed.Host + + for _, pattern := range patterns { + + if valid := pattern.MatchString(toTest); valid { + return true + } + } + return false +} + +// TokenSource Returns an external account TokenSource struct. This is to be called by package google to construct a google.Credentials. +func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + return c.tokenSource(ctx, validTokenURLPatterns, validImpersonateURLPatterns, "https") +} + +// tokenSource is a private function that's directly called by some of the tests, +// because the unit test URLs are mocked, and would otherwise fail the +// validity check. +func (c *Config) tokenSource(ctx context.Context, tokenURLValidPats []*regexp.Regexp, impersonateURLValidPats []*regexp.Regexp, scheme string) (oauth2.TokenSource, error) { + valid := validateURL(c.TokenURL, tokenURLValidPats, scheme) + if !valid { + return nil, fmt.Errorf("oauth2/google: invalid TokenURL provided while constructing tokenSource") + } + + if c.ServiceAccountImpersonationURL != "" { + valid := validateURL(c.ServiceAccountImpersonationURL, impersonateURLValidPats, scheme) + if !valid { + return nil, fmt.Errorf("oauth2/google: invalid ServiceAccountImpersonationURL provided while constructing tokenSource") + } + } + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + if c.ServiceAccountImpersonationURL == "" { + return oauth2.ReuseTokenSource(nil, ts), nil + } + scopes := c.Scopes + ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp := impersonateTokenSource{ + ctx: ctx, + url: c.ServiceAccountImpersonationURL, + scopes: scopes, + ts: oauth2.ReuseTokenSource(nil, ts), + } + return oauth2.ReuseTokenSource(nil, imp), nil +} + +// Subject token file types. +const ( + fileTypeText = "text" + fileTypeJSON = "json" +) + +type format struct { + // Type is either "text" or "json". When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +// Either the File or the URL field should be filled, depending on the kind of credential in question. +// The EnvironmentID should start with AWS if being used for an AWS credential. +type CredentialSource struct { + File string `json:"file"` + + URL string `json:"url"` + Headers map[string]string `json:"headers"` + + EnvironmentID string `json:"environment_id"` + RegionURL string `json:"region_url"` + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + CredVerificationURL string `json:"cred_verification_url"` + Format format `json:"format"` +} + +// parse determines the type of CredentialSource needed +func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { + if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) + } + return awsCredentialSource{ + EnvironmentID: c.CredentialSource.EnvironmentID, + RegionURL: c.CredentialSource.RegionURL, + RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, + CredVerificationURL: c.CredentialSource.URL, + TargetResource: c.Audience, + ctx: ctx, + }, nil + } + } else if c.CredentialSource.File != "" { + return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil + } else if c.CredentialSource.URL != "" { + return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } + return nil, fmt.Errorf("oauth2/google: unable to parse credential source") +} + +type baseCredentialSource interface { + subjectToken() (string, error) +} + +// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. +type tokenSource struct { + ctx context.Context + conf *Config +} + +// Token allows tokenSource to conform to the oauth2.TokenSource interface. +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + + credSource, err := conf.parse(ts.ctx) + if err != nil { + return nil, err + } + subjectToken, err := credSource.subjectToken() + + if err != nil { + return nil, err + } + stsRequest := stsTokenExchangeRequest{ + GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", + Audience: conf.Audience, + Scope: conf.Scopes, + RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", + SubjectToken: subjectToken, + SubjectTokenType: conf.SubjectTokenType, + } + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + clientAuth := clientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + stsResp, err := exchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, nil) + if err != nil { + return nil, err + } + + accessToken := &oauth2.Token{ + AccessToken: stsResp.AccessToken, + TokenType: stsResp.TokenType, + } + if stsResp.ExpiresIn < 0 { + return nil, fmt.Errorf("oauth2/google: got invalid expiry from security token service") + } else if stsResp.ExpiresIn >= 0 { + accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + } + + if stsResp.RefreshToken != "" { + accessToken.RefreshToken = stsResp.RefreshToken + } + return accessToken, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go new file mode 100644 index 000000000..99987ce29 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "encoding/base64" + "net/http" + "net/url" + + "golang.org/x/oauth2" +) + +// clientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type clientAuthentication struct { + // AuthStyle can be either basic or request-body + AuthStyle oauth2.AuthStyle + ClientID string + ClientSecret string +} + +// InjectAuthentication is used to add authentication to a Secure Token Service exchange +// request. It modifies either the passed url.Values or http.Header depending on the desired +// authentication format. +func (c *clientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { + if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { + return + } + + switch c.AuthStyle { + case oauth2.AuthStyleInHeader: // AuthStyleInHeader corresponds to basic authentication as defined in rfc7617#2 + plainHeader := c.ClientID + ":" + c.ClientSecret + headers.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader))) + case oauth2.AuthStyleInParams: // AuthStyleInParams corresponds to request-body authentication with ClientID and ClientSecret in the message body. + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + case oauth2.AuthStyleAutoDetect: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + default: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + } +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go new file mode 100644 index 000000000..233a78cef --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import "fmt" + +// Error for handling OAuth related error responses as stated in rfc6749#5.2. +type Error struct { + Code string + URI string + Description string +} + +func (err *Error) Error() string { + return fmt.Sprintf("got error code %s from %s: %s", err.Code, err.URI, err.Description) +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go new file mode 100644 index 000000000..e953ddb47 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go @@ -0,0 +1,57 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" +) + +type fileCredentialSource struct { + File string + Format format +} + +func (cs fileCredentialSource) subjectToken() (string, error) { + tokenFile, err := os.Open(cs.File) + if err != nil { + return "", fmt.Errorf("oauth2/google: failed to open credential file %q", cs.File) + } + defer tokenFile.Close() + tokenBytes, err := ioutil.ReadAll(io.LimitReader(tokenFile, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google: failed to read credential file: %v", err) + } + tokenBytes = bytes.TrimSpace(tokenBytes) + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google: improperly formatted subject token") + } + return token, nil + case "text": + return string(tokenBytes), nil + case "": + return string(tokenBytes), nil + default: + return "", errors.New("oauth2/google: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go new file mode 100644 index 000000000..64edb5600 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go @@ -0,0 +1,84 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +type impersonateTokenSource struct { + ctx context.Context + ts oauth2.TokenSource + + url string + scopes []string +} + +// Token performs the exchange to get a temporary service account token to allow access to GCP. +func (its impersonateTokenSource) Token() (*oauth2.Token, error) { + reqBody := generateAccessTokenReq{ + Lifetime: "3600s", + Scope: its.scopes, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to marshal request: %v", err) + } + client := oauth2.NewClient(its.ctx, its.ts) + req, err := http.NewRequest("POST", its.url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to create impersonation request: %v", err) + } + req = req.WithContext(its.ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + TokenType: "Bearer", + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go new file mode 100644 index 000000000..e6fcae5fc --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go @@ -0,0 +1,107 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "golang.org/x/oauth2" +) + +// exchangeToken performs an oauth2 token exchange with the provided endpoint. +// The first 4 fields are all mandatory. headers can be used to pass additional +// headers beyond the bare minimum required by the token exchange. options can +// be used to pass additional JSON-structured options to the remote server. +func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchangeRequest, authentication clientAuthentication, headers http.Header, options map[string]interface{}) (*stsTokenExchangeResponse, error) { + + client := oauth2.NewClient(ctx, nil) + + data := url.Values{} + data.Set("audience", request.Audience) + data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") + data.Set("requested_token_type", "urn:ietf:params:oauth:token-type:access_token") + data.Set("subject_token_type", request.SubjectTokenType) + data.Set("subject_token", request.SubjectToken) + data.Set("scope", strings.Join(request.Scope, " ")) + if options != nil { + opts, err := json.Marshal(options) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to marshal additional options: %v", err) + } + data.Set("options", string(opts)) + } + + authentication.InjectAuthentication(data, headers) + encodedData := data.Encode() + + req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) + + } + req = req.WithContext(ctx) + for key, list := range headers { + for _, val := range list { + req.Header.Add(key, val) + } + } + req.Header.Add("Content-Length", strconv.Itoa(len(encodedData))) + + resp, err := client.Do(req) + + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid response from Secure Token Server: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, err + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + var stsResp stsTokenExchangeResponse + err = json.Unmarshal(body, &stsResp) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) + + } + + return &stsResp, nil +} + +// stsTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type stsTokenExchangeRequest struct { + ActingParty struct { + ActorToken string + ActorTokenType string + } + GrantType string + Resource string + Audience string + Scope []string + RequestedTokenType string + SubjectToken string + SubjectTokenType string +} + +// stsTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. +type stsTokenExchangeResponse struct { + AccessToken string `json:"access_token"` + IssuedTokenType string `json:"issued_token_type"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go new file mode 100644 index 000000000..16dca6541 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go @@ -0,0 +1,75 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "golang.org/x/oauth2" +) + +type urlCredentialSource struct { + URL string + Headers map[string]string + Format format + ctx context.Context +} + +func (cs urlCredentialSource) subjectToken() (string, error) { + client := oauth2.NewClient(cs.ctx, nil) + req, err := http.NewRequest("GET", cs.URL, nil) + if err != nil { + return "", fmt.Errorf("oauth2/google: HTTP request for URL-sourced credential failed: %v", err) + } + req = req.WithContext(cs.ctx) + + for key, val := range cs.Headers { + req.Header.Add(key, val) + } + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("oauth2/google: invalid response when retrieving subject token: %v", err) + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google: invalid body in subject token URL query: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return "", fmt.Errorf("oauth2/google: status code %d: %s", c, respBody) + } + + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(respBody, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google: improperly formatted subject token") + } + return token, nil + case "text": + return string(respBody), nil + case "": + return string(respBody), nil + default: + return "", errors.New("oauth2/google: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go index b0fdb3a88..67d97b990 100644 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -7,6 +7,7 @@ package google import ( "crypto/rsa" "fmt" + "strings" "time" "golang.org/x/oauth2" @@ -24,6 +25,28 @@ import ( // optimization supported by a few Google services. // Unless you know otherwise, you should use JWTConfigFromJSON instead. func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, audience, nil) +} + +// JWTAccessTokenSourceWithScope uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The scope is typically a list of URLs that specifies the scope of the +// credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceWithScope(jsonKey []byte, scope ...string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, "", scope) +} + +func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.TokenSource, error) { + if len(scopes) == 0 && audience == "" { + return nil, fmt.Errorf("google: missing scope/audience for JWT access token") + } + cfg, err := JWTConfigFromJSON(jsonKey) if err != nil { return nil, fmt.Errorf("google: could not parse JSON key: %v", err) @@ -35,6 +58,7 @@ func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.Token ts := &jwtAccessTokenSource{ email: cfg.Email, audience: audience, + scopes: scopes, pk: pk, pkID: cfg.PrivateKeyID, } @@ -47,6 +71,7 @@ func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.Token type jwtAccessTokenSource struct { email, audience string + scopes []string pk *rsa.PrivateKey pkID string } @@ -54,12 +79,14 @@ type jwtAccessTokenSource struct { func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { iat := time.Now() exp := iat.Add(time.Hour) + scope := strings.Join(ts.scopes, " ") cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Iat: iat.Unix(), - Exp: exp.Unix(), + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), } hdr := &jws.Header{ Algorithm: "RS256", diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go index 743487188..e1755d1d9 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index abbec2d44..b56886f26 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -56,6 +56,7 @@ var X86 struct { HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions HasBMI1 bool // Bit manipulation instruction set 1 HasBMI2 bool // Bit manipulation instruction set 2 + HasCX16 bool // Compare and exchange 16 Bytes HasERMS bool // Enhanced REP for MOVSB and STOSB HasFMA bool // Fused-multiply-add instructions HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 54ca4667f..5ea287b7e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -39,6 +39,7 @@ func initOptions() { {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, {Name: "bmi1", Feature: &X86.HasBMI1}, {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, {Name: "erms", Feature: &X86.HasERMS}, {Name: "fma", Feature: &X86.HasFMA}, {Name: "osxsave", Feature: &X86.HasOSXSAVE}, @@ -73,6 +74,7 @@ func archInit() { X86.HasPCLMULQDQ = isSet(1, ecx1) X86.HasSSSE3 = isSet(9, ecx1) X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) X86.HasSSE41 = isSet(19, ecx1) X86.HasSSE42 = isSet(20, ecx1) X86.HasPOPCNT = isSet(23, ecx1) diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go new file mode 100644 index 000000000..934af313c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -0,0 +1,149 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package unix + +import ( + "bytes" + "unsafe" +) + +// Helpers for dealing with ifreq since it contains a union and thus requires a +// lot of unsafe.Pointer casts to use properly. + +// An Ifreq is a type-safe wrapper around the raw ifreq struct. An Ifreq +// contains an interface name and a union of arbitrary data which can be +// accessed using the Ifreq's methods. To create an Ifreq, use the NewIfreq +// function. +// +// Use the Name method to access the stored interface name. The union data +// fields can be get and set using the following methods: +// - Uint16/SetUint16: flags +// - Uint32/SetUint32: ifindex, metric, mtu +type Ifreq struct{ raw ifreq } + +// NewIfreq creates an Ifreq with the input network interface name after +// validating the name does not exceed IFNAMSIZ-1 (trailing NULL required) +// bytes. +func NewIfreq(name string) (*Ifreq, error) { + // Leave room for terminating NULL byte. + if len(name) >= IFNAMSIZ { + return nil, EINVAL + } + + var ifr ifreq + copy(ifr.Ifrn[:], name) + + return &Ifreq{raw: ifr}, nil +} + +// TODO(mdlayher): get/set methods for hardware address sockaddr, char array, etc. + +// Name returns the interface name associated with the Ifreq. +func (ifr *Ifreq) Name() string { + // BytePtrToString requires a NULL terminator or the program may crash. If + // one is not present, just return the empty string. + if !bytes.Contains(ifr.raw.Ifrn[:], []byte{0x00}) { + return "" + } + + return BytePtrToString(&ifr.raw.Ifrn[0]) +} + +// According to netdevice(7), only AF_INET addresses are returned for numerous +// sockaddr ioctls. For convenience, we expose these as Inet4Addr since the Port +// field and other data is always empty. + +// Inet4Addr returns the Ifreq union data from an embedded sockaddr as a C +// in_addr/Go []byte (4-byte IPv4 address) value. If the sockaddr family is not +// AF_INET, an error is returned. +func (ifr *Ifreq) Inet4Addr() ([]byte, error) { + raw := *(*RawSockaddrInet4)(unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0])) + if raw.Family != AF_INET { + // Cannot safely interpret raw.Addr bytes as an IPv4 address. + return nil, EINVAL + } + + return raw.Addr[:], nil +} + +// SetInet4Addr sets a C in_addr/Go []byte (4-byte IPv4 address) value in an +// embedded sockaddr within the Ifreq's union data. v must be 4 bytes in length +// or an error will be returned. +func (ifr *Ifreq) SetInet4Addr(v []byte) error { + if len(v) != 4 { + return EINVAL + } + + var addr [4]byte + copy(addr[:], v) + + ifr.clear() + *(*RawSockaddrInet4)( + unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0]), + ) = RawSockaddrInet4{ + // Always set IP family as ioctls would require it anyway. + Family: AF_INET, + Addr: addr, + } + + return nil +} + +// Uint16 returns the Ifreq union data as a C short/Go uint16 value. +func (ifr *Ifreq) Uint16() uint16 { + return *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0])) +} + +// SetUint16 sets a C short/Go uint16 value as the Ifreq's union data. +func (ifr *Ifreq) SetUint16(v uint16) { + ifr.clear() + *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0])) = v +} + +// Uint32 returns the Ifreq union data as a C int/Go uint32 value. +func (ifr *Ifreq) Uint32() uint32 { + return *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0])) +} + +// SetUint32 sets a C int/Go uint32 value as the Ifreq's union data. +func (ifr *Ifreq) SetUint32(v uint32) { + ifr.clear() + *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0])) = v +} + +// clear zeroes the ifreq's union field to prevent trailing garbage data from +// being sent to the kernel if an ifreq is reused. +func (ifr *Ifreq) clear() { + for i := range ifr.raw.Ifru { + ifr.raw.Ifru[i] = 0 + } +} + +// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as +// IoctlGetEthtoolDrvinfo which use these APIs under the hood. + +// An ifreqData is an Ifreq which carries pointer data. To produce an ifreqData, +// use the Ifreq.withData method. +type ifreqData struct { + name [IFNAMSIZ]byte + // A type separate from ifreq is required in order to comply with the + // unsafe.Pointer rules since the "pointer-ness" of data would not be + // preserved if it were cast into the byte array of a raw ifreq. + data unsafe.Pointer + // Pad to the same size as ifreq. + _ [len(ifreq{}.Ifru) - SizeofPtr]byte +} + +// withData produces an ifreqData with the pointer p set for ioctls which require +// arbitrary pointer data. +func (ifr Ifreq) withData(p unsafe.Pointer) ifreqData { + return ifreqData{ + name: ifr.raw.Ifrn, + data: p, + } +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 48773f730..1dadead21 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -5,7 +5,6 @@ package unix import ( - "runtime" "unsafe" ) @@ -22,56 +21,42 @@ func IoctlRetInt(fd int, req uint) (int, error) { func IoctlGetUint32(fd int, req uint) (uint32, error) { var value uint32 - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetRTCTime(fd int) (*RTCTime, error) { var value RTCTime - err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, RTC_RD_TIME, unsafe.Pointer(&value)) return &value, err } func IoctlSetRTCTime(fd int, value *RTCTime) error { - err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, RTC_SET_TIME, unsafe.Pointer(value)) } func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { var value RTCWkAlrm - err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, RTC_WKALM_RD, unsafe.Pointer(&value)) return &value, err } func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error { - err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -type ifreqEthtool struct { - name [IFNAMSIZ]byte - data unsafe.Pointer + return ioctlPtr(fd, RTC_WKALM_SET, unsafe.Pointer(value)) } // IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network // device specified by ifname. func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { - // Leave room for terminating NULL byte. - if len(ifname) >= IFNAMSIZ { - return nil, EINVAL + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err } - value := EthtoolDrvinfo{ - Cmd: ETHTOOL_GDRVINFO, - } - ifreq := ifreqEthtool{ - data: unsafe.Pointer(&value), - } - copy(ifreq.name[:], ifname) - err := ioctl(fd, SIOCETHTOOL, uintptr(unsafe.Pointer(&ifreq))) - runtime.KeepAlive(ifreq) + value := EthtoolDrvinfo{Cmd: ETHTOOL_GDRVINFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) return &value, err } @@ -80,7 +65,7 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { var value WatchdogInfo - err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, WDIOC_GETSUPPORT, unsafe.Pointer(&value)) return &value, err } @@ -88,6 +73,7 @@ func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { // more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. func IoctlWatchdogKeepalive(fd int) error { + // arg is ignored and not a pointer, so ioctl is fine instead of ioctlPtr. return ioctl(fd, WDIOC_KEEPALIVE, 0) } @@ -95,9 +81,7 @@ func IoctlWatchdogKeepalive(fd int) error { // range of data conveyed in value to the file associated with the file // descriptor destFd. See the ioctl_ficlonerange(2) man page for details. func IoctlFileCloneRange(destFd int, value *FileCloneRange) error { - err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(destFd, FICLONERANGE, unsafe.Pointer(value)) } // IoctlFileClone performs an FICLONE ioctl operation to clone the entire file @@ -148,7 +132,7 @@ func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error { rawinfo.Reserved = value.Info[i].Reserved } - err := ioctl(srcFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(&buf[0]))) + err := ioctlPtr(srcFd, FIDEDUPERANGE, unsafe.Pointer(&buf[0])) // Output for i := range value.Info { @@ -166,31 +150,47 @@ func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error { } func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error { - err := ioctl(fd, HIDIOCGRDESC, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, HIDIOCGRDESC, unsafe.Pointer(value)) } func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) { var value HIDRawDevInfo - err := ioctl(fd, HIDIOCGRAWINFO, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, HIDIOCGRAWINFO, unsafe.Pointer(&value)) return &value, err } func IoctlHIDGetRawName(fd int) (string, error) { var value [_HIDIOCGRAWNAME_LEN]byte - err := ioctl(fd, _HIDIOCGRAWNAME, uintptr(unsafe.Pointer(&value[0]))) + err := ioctlPtr(fd, _HIDIOCGRAWNAME, unsafe.Pointer(&value[0])) return ByteSliceToString(value[:]), err } func IoctlHIDGetRawPhys(fd int) (string, error) { var value [_HIDIOCGRAWPHYS_LEN]byte - err := ioctl(fd, _HIDIOCGRAWPHYS, uintptr(unsafe.Pointer(&value[0]))) + err := ioctlPtr(fd, _HIDIOCGRAWPHYS, unsafe.Pointer(&value[0])) return ByteSliceToString(value[:]), err } func IoctlHIDGetRawUniq(fd int) (string, error) { var value [_HIDIOCGRAWUNIQ_LEN]byte - err := ioctl(fd, _HIDIOCGRAWUNIQ, uintptr(unsafe.Pointer(&value[0]))) + err := ioctlPtr(fd, _HIDIOCGRAWUNIQ, unsafe.Pointer(&value[0])) return ByteSliceToString(value[:]), err } + +// IoctlIfreq performs an ioctl using an Ifreq structure for input and/or +// output. See the netdevice(7) man page for details. +func IoctlIfreq(fd int, req uint, value *Ifreq) error { + // It is possible we will add more fields to *Ifreq itself later to prevent + // misuse, so pass the raw *ifreq directly. + return ioctlPtr(fd, req, unsafe.Pointer(&value.raw)) +} + +// TODO(mdlayher): export if and when IfreqData is exported. + +// ioctlIfreqData performs an ioctl using an ifreqData structure for input +// and/or output. See the netdevice(7) man page for details. +func ioctlIfreqData(fd int, req uint, value *ifreqData) error { + // The memory layout of IfreqData (type-safe) and ifreq (not type-safe) are + // identical so pass *IfreqData directly. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 6e6afcaa1..0bcb8c322 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -217,8 +217,6 @@ struct ltchars { #include #include #include -#include -#include #include #include #include @@ -502,7 +500,7 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 41b91fdfb..2839435e3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -66,11 +66,18 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { return fchmodat(dirfd, path, mode) } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. -// These are defined in ioctl.go and ioctl_linux.go. +// ioctl itself should not be exposed directly, but additional get/set functions +// for specific types are permissible. These are defined in ioctl.go and +// ioctl_linux.go. +// +// The third argument to ioctl is often a pointer but sometimes an integer. +// Callers should use ioctlPtr when the third argument is a pointer and ioctl +// when the third argument is an integer. +// +// TODO: some existing code incorrectly uses ioctl when it should use ioctlPtr. //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) @@ -1348,6 +1355,13 @@ func SetsockoptTpacketReq3(fd, level, opt int, tp *TpacketReq3) error { return setsockopt(fd, level, opt, unsafe.Pointer(tp), unsafe.Sizeof(*tp)) } +func SetsockoptTCPRepairOpt(fd, level, opt int, o []TCPRepairOpt) (err error) { + if len(o) == 0 { + return EINVAL + } + return setsockopt(fd, level, opt, unsafe.Pointer(&o[0]), uintptr(SizeofTCPRepairOpt*len(o))) +} + // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. @@ -1859,7 +1873,7 @@ func Getpgrp() (pid int) { //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 +//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index b430536c8..91317d749 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -105,7 +105,7 @@ const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -133,7 +133,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 39a864d4e..b961a620e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -184,7 +184,7 @@ const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -212,7 +212,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 7f27ebf2f..4b977ba44 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -171,7 +171,7 @@ func Pipe2(p []int, flags int) (err error) { // Getrlimit prefers the prlimit64 system call. See issue 38604. func Getrlimit(resource int, rlim *Rlimit) error { - err := prlimit(0, resource, nil, rlim) + err := Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -180,7 +180,7 @@ func Getrlimit(resource int, rlim *Rlimit) error { // Setrlimit prefers the prlimit64 system call. See issue 38604. func Setrlimit(resource int, rlim *Rlimit) error { - err := prlimit(0, resource, rlim, nil) + err := Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 3a5621e37..21d74e2fb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -157,7 +157,7 @@ type rlimit32 struct { //sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -185,7 +185,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index cf0d36f76..6f1fc581e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,8 +3,7 @@ // license that can be found in the LICENSE file. //go:build linux && ppc -// +build linux -// +build ppc +// +build linux,ppc package unix @@ -143,7 +142,7 @@ const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -171,7 +170,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 77fcde7c1..d2a6495c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -13,7 +13,10 @@ package unix import ( + "fmt" + "os" "runtime" + "sync" "syscall" "unsafe" ) @@ -744,3 +747,240 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e func Munmap(b []byte) (err error) { return mapper.Munmap(b) } + +// Event Ports + +type fileObjCookie struct { + fobj *fileObj + cookie interface{} +} + +// EventPort provides a safe abstraction on top of Solaris/illumos Event Ports. +type EventPort struct { + port int + mu sync.Mutex + fds map[uintptr]interface{} + paths map[string]*fileObjCookie +} + +// PortEvent is an abstraction of the port_event C struct. +// Compare Source against PORT_SOURCE_FILE or PORT_SOURCE_FD +// to see if Path or Fd was the event source. The other will be +// uninitialized. +type PortEvent struct { + Cookie interface{} + Events int32 + Fd uintptr + Path string + Source uint16 + fobj *fileObj +} + +// NewEventPort creates a new EventPort including the +// underlying call to port_create(3c). +func NewEventPort() (*EventPort, error) { + port, err := port_create() + if err != nil { + return nil, err + } + e := &EventPort{ + port: port, + fds: make(map[uintptr]interface{}), + paths: make(map[string]*fileObjCookie), + } + return e, nil +} + +//sys port_create() (n int, err error) +//sys port_associate(port int, source int, object uintptr, events int, user *byte) (n int, err error) +//sys port_dissociate(port int, source int, object uintptr) (n int, err error) +//sys port_get(port int, pe *portEvent, timeout *Timespec) (n int, err error) +//sys port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Timespec) (n int, err error) + +// Close closes the event port. +func (e *EventPort) Close() error { + e.mu.Lock() + defer e.mu.Unlock() + e.fds = nil + e.paths = nil + return Close(e.port) +} + +// PathIsWatched checks to see if path is associated with this EventPort. +func (e *EventPort) PathIsWatched(path string) bool { + e.mu.Lock() + defer e.mu.Unlock() + _, found := e.paths[path] + return found +} + +// FdIsWatched checks to see if fd is associated with this EventPort. +func (e *EventPort) FdIsWatched(fd uintptr) bool { + e.mu.Lock() + defer e.mu.Unlock() + _, found := e.fds[fd] + return found +} + +// AssociatePath wraps port_associate(3c) for a filesystem path including +// creating the necessary file_obj from the provided stat information. +func (e *EventPort) AssociatePath(path string, stat os.FileInfo, events int, cookie interface{}) error { + e.mu.Lock() + defer e.mu.Unlock() + if _, found := e.paths[path]; found { + return fmt.Errorf("%v is already associated with this Event Port", path) + } + fobj, err := createFileObj(path, stat) + if err != nil { + return err + } + fCookie := &fileObjCookie{fobj, cookie} + _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fobj)), events, (*byte)(unsafe.Pointer(&fCookie.cookie))) + if err != nil { + return err + } + e.paths[path] = fCookie + return nil +} + +// DissociatePath wraps port_dissociate(3c) for a filesystem path. +func (e *EventPort) DissociatePath(path string) error { + e.mu.Lock() + defer e.mu.Unlock() + f, ok := e.paths[path] + if !ok { + return fmt.Errorf("%v is not associated with this Event Port", path) + } + _, err := port_dissociate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(f.fobj))) + if err != nil { + return err + } + delete(e.paths, path) + return nil +} + +// AssociateFd wraps calls to port_associate(3c) on file descriptors. +func (e *EventPort) AssociateFd(fd uintptr, events int, cookie interface{}) error { + e.mu.Lock() + defer e.mu.Unlock() + if _, found := e.fds[fd]; found { + return fmt.Errorf("%v is already associated with this Event Port", fd) + } + pcookie := &cookie + _, err := port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(pcookie))) + if err != nil { + return err + } + e.fds[fd] = pcookie + return nil +} + +// DissociateFd wraps calls to port_dissociate(3c) on file descriptors. +func (e *EventPort) DissociateFd(fd uintptr) error { + e.mu.Lock() + defer e.mu.Unlock() + _, ok := e.fds[fd] + if !ok { + return fmt.Errorf("%v is not associated with this Event Port", fd) + } + _, err := port_dissociate(e.port, PORT_SOURCE_FD, fd) + if err != nil { + return err + } + delete(e.fds, fd) + return nil +} + +func createFileObj(name string, stat os.FileInfo) (*fileObj, error) { + fobj := new(fileObj) + bs, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) + s := stat.Sys().(*syscall.Stat_t) + fobj.Atim.Sec = s.Atim.Sec + fobj.Atim.Nsec = s.Atim.Nsec + fobj.Mtim.Sec = s.Mtim.Sec + fobj.Mtim.Nsec = s.Mtim.Nsec + fobj.Ctim.Sec = s.Ctim.Sec + fobj.Ctim.Nsec = s.Ctim.Nsec + return fobj, nil +} + +// GetOne wraps port_get(3c) and returns a single PortEvent. +func (e *EventPort) GetOne(t *Timespec) (*PortEvent, error) { + pe := new(portEvent) + _, err := port_get(e.port, pe, t) + if err != nil { + return nil, err + } + p := new(PortEvent) + p.Events = pe.Events + p.Source = pe.Source + e.mu.Lock() + defer e.mu.Unlock() + switch pe.Source { + case PORT_SOURCE_FD: + p.Fd = uintptr(pe.Object) + cookie := (*interface{})(unsafe.Pointer(pe.User)) + p.Cookie = *cookie + delete(e.fds, p.Fd) + case PORT_SOURCE_FILE: + p.fobj = (*fileObj)(unsafe.Pointer(uintptr(pe.Object))) + p.Path = BytePtrToString((*byte)(unsafe.Pointer(p.fobj.Name))) + cookie := (*interface{})(unsafe.Pointer(pe.User)) + p.Cookie = *cookie + delete(e.paths, p.Path) + } + return p, nil +} + +// Pending wraps port_getn(3c) and returns how many events are pending. +func (e *EventPort) Pending() (int, error) { + var n uint32 = 0 + _, err := port_getn(e.port, nil, 0, &n, nil) + return int(n), err +} + +// Get wraps port_getn(3c) and fills a slice of PortEvent. +// It will block until either min events have been received +// or the timeout has been exceeded. It will return how many +// events were actually received along with any error information. +func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) { + if min == 0 { + return 0, fmt.Errorf("need to request at least one event or use Pending() instead") + } + if len(s) < min { + return 0, fmt.Errorf("len(s) (%d) is less than min events requested (%d)", len(s), min) + } + got := uint32(min) + max := uint32(len(s)) + var err error + ps := make([]portEvent, max, max) + _, err = port_getn(e.port, &ps[0], max, &got, timeout) + // got will be trustworthy with ETIME, but not any other error. + if err != nil && err != ETIME { + return 0, err + } + e.mu.Lock() + defer e.mu.Unlock() + for i := 0; i < int(got); i++ { + s[i].Events = ps[i].Events + s[i].Source = ps[i].Source + switch ps[i].Source { + case PORT_SOURCE_FD: + s[i].Fd = uintptr(ps[i].Object) + cookie := (*interface{})(unsafe.Pointer(ps[i].User)) + s[i].Cookie = *cookie + delete(e.fds, s[i].Fd) + case PORT_SOURCE_FILE: + s[i].fobj = (*fileObj)(unsafe.Pointer(uintptr(ps[i].Object))) + s[i].Path = BytePtrToString((*byte)(unsafe.Pointer(s[i].fobj.Name))) + cookie := (*interface{})(unsafe.Pointer(ps[i].User)) + s[i].Cookie = *cookie + delete(e.paths, s[i].Path) + } + } + return int(got), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index a7618ceb5..cf296a243 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -313,6 +313,10 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } +func Send(s int, buf []byte, flags int) (err error) { + return sendto(s, buf, flags, nil, 0) +} + func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { ptr, n, err := to.sockaddr() if err != nil { diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 5bb48ef54..a3a45fec5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1206,6 +1206,7 @@ const ( RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 RTF_GATEWAY = 0x2 + RTF_GLOBAL = 0x40000000 RTF_HOST = 0x4 RTF_IFREF = 0x4000000 RTF_IFSCOPE = 0x1000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index 11e570979..31009d7f0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1206,6 +1206,7 @@ const ( RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 RTF_GATEWAY = 0x2 + RTF_GLOBAL = 0x40000000 RTF_HOST = 0x4 RTF_IFREF = 0x4000000 RTF_IFSCOPE = 0x1000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 52f5bbc14..8894c4af4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -228,7 +228,11 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_BTF_ID = 0x3 BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_FUNC = 0x4 + BPF_PSEUDO_KFUNC_CALL = 0x2 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_IDX = 0x5 + BPF_PSEUDO_MAP_IDX_VALUE = 0x6 BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 @@ -475,6 +479,8 @@ const ( DM_LIST_VERSIONS = 0xc138fd0d DM_MAX_TYPE_NAME = 0x10 DM_NAME_LEN = 0x80 + DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID = 0x2 + DM_NAME_LIST_FLAG_HAS_UUID = 0x1 DM_NOFLUSH_FLAG = 0x800 DM_PERSISTENT_DEV_FLAG = 0x8 DM_QUERY_INACTIVE_TABLE_FLAG = 0x1000 @@ -494,9 +500,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2021-02-01)" + DM_VERSION_EXTRA = "-ioctl (2021-03-22)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2c + DM_VERSION_MINOR = 0x2d DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -981,12 +987,6 @@ const ( HPFS_SUPER_MAGIC = 0xf995e849 HUGETLBFS_MAGIC = 0x958458f6 IBSHIFT = 0x10 - ICMPV6_FILTER = 0x1 - ICMPV6_FILTER_BLOCK = 0x1 - ICMPV6_FILTER_BLOCKOTHERS = 0x3 - ICMPV6_FILTER_PASS = 0x2 - ICMPV6_FILTER_PASSONLY = 0x4 - ICMP_FILTER = 0x1 ICRNL = 0x100 IFA_F_DADFAILED = 0x8 IFA_F_DEPRECATED = 0x20 @@ -1257,6 +1257,7 @@ const ( KEXEC_ARCH_PARISC = 0xf0000 KEXEC_ARCH_PPC = 0x140000 KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_RISCV = 0xf30000 KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 @@ -1636,11 +1637,12 @@ const ( NFNL_MSG_BATCH_END = 0x11 NFNL_NFA_NEST = 0x8000 NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_COUNT = 0xd NFNL_SUBSYS_CTHELPER = 0x9 NFNL_SUBSYS_CTNETLINK = 0x1 NFNL_SUBSYS_CTNETLINK_EXP = 0x2 NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_HOOK = 0xc NFNL_SUBSYS_IPSET = 0x6 NFNL_SUBSYS_NFTABLES = 0xa NFNL_SUBSYS_NFT_COMPAT = 0xb @@ -1756,14 +1758,19 @@ const ( PERF_ATTR_SIZE_VER4 = 0x68 PERF_ATTR_SIZE_VER5 = 0x70 PERF_ATTR_SIZE_VER6 = 0x78 + PERF_ATTR_SIZE_VER7 = 0x80 PERF_AUX_FLAG_COLLISION = 0x8 + PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT = 0x0 + PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW = 0x100 PERF_AUX_FLAG_OVERWRITE = 0x2 PERF_AUX_FLAG_PARTIAL = 0x4 + PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 PERF_FLAG_FD_CLOEXEC = 0x8 PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_HW_EVENT_MASK = 0xffffffff PERF_MAX_CONTEXTS_PER_STACK = 0x8 PERF_MAX_STACK_DEPTH = 0x7f PERF_MEM_BLK_ADDR = 0x4 @@ -1822,6 +1829,7 @@ const ( PERF_MEM_TLB_OS = 0x40 PERF_MEM_TLB_SHIFT = 0x1a PERF_MEM_TLB_WK = 0x20 + PERF_PMU_TYPE_SHIFT = 0x20 PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER = 0x1 PERF_RECORD_MISC_COMM_EXEC = 0x2000 PERF_RECORD_MISC_CPUMODE_MASK = 0x7 @@ -1921,7 +1929,15 @@ const ( PR_PAC_APGAKEY = 0x10 PR_PAC_APIAKEY = 0x1 PR_PAC_APIBKEY = 0x2 + PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 + PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_SCHED_CORE = 0x3e + PR_SCHED_CORE_CREATE = 0x1 + PR_SCHED_CORE_GET = 0x0 + PR_SCHED_CORE_MAX = 0x4 + PR_SCHED_CORE_SHARE_FROM = 0x3 + PR_SCHED_CORE_SHARE_TO = 0x2 PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -2003,6 +2019,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_RSEQ_CONFIGURATION = 0x420f PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -2163,6 +2180,7 @@ const ( RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 + RTM_DELNEXTHOPBUCKET = 0x75 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -2193,6 +2211,7 @@ const ( RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 RTM_GETNEXTHOP = 0x6a + RTM_GETNEXTHOPBUCKET = 0x76 RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -2201,7 +2220,7 @@ const ( RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e RTM_GETVLAN = 0x72 - RTM_MAX = 0x73 + RTM_MAX = 0x77 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -2215,6 +2234,7 @@ const ( RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 RTM_NEWNEXTHOP = 0x68 + RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 @@ -2224,8 +2244,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x19 - RTM_NR_MSGTYPES = 0x64 + RTM_NR_FAMILIES = 0x1a + RTM_NR_MSGTYPES = 0x68 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2253,6 +2273,7 @@ const ( RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 @@ -2283,6 +2304,7 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 SEEK_DATA = 0x3 @@ -2536,6 +2558,14 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 09fc559ed..697811a46 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -308,6 +309,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 75730cc22..7d8d93bfc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -309,6 +310,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 127cf17ad..f707d5089 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -315,6 +316,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 957ca1ff1..3a67a9c85 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -148,6 +148,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -305,6 +306,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 314a2054f..a7ccef56c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -308,6 +309,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 457e8de97..f7b7cec91 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -308,6 +309,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 33cd28f6b..4fcacf958 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -308,6 +309,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 0e085ba14..6f6c223a2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -308,6 +309,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1b5928cff..59e522bcf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -363,6 +364,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f3a41d6ec..d4264a0f7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -367,6 +368,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 6a5a555d5..21cbec1dd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -367,6 +368,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index a4da67edb..9b05bf12f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -296,6 +297,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index a7028e0ef..bd82ace09 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 @@ -371,6 +372,7 @@ const ( SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index ed3b3286c..1f8bded56 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -150,6 +150,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 @@ -362,6 +363,7 @@ const ( SO_MARK = 0x22 SO_MAX_PACING_RATE = 0x31 SO_MEMINFO = 0x39 + SO_NETNS_COOKIE = 0x50 SO_NOFCS = 0x27 SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 593cc0fef..6d56edc05 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -1020,7 +1020,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index a4e4c2231..aef6c0856 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1020,7 +1020,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 7305cc915..2dbe3da7a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -48,6 +48,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -1201,7 +1211,7 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { +func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 4e18d5c99..b5f926cee 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,11 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_port_create port_create "libc.so" +//go:cgo_import_dynamic libc_port_associate port_associate "libc.so" +//go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" +//go:cgo_import_dynamic libc_port_get port_get "libc.so" +//go:cgo_import_dynamic libc_port_getn port_getn "libc.so" //go:linkname procpipe libc_pipe //go:linkname procpipe2 libc_pipe2 @@ -272,6 +277,11 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procport_create libc_port_create +//go:linkname procport_associate libc_port_associate +//go:linkname procport_dissociate libc_port_dissociate +//go:linkname procport_get libc_port_get +//go:linkname procport_getn libc_port_getn var ( procpipe, @@ -403,7 +413,12 @@ var ( proc__xnet_getsockopt, procgetpeername, procsetsockopt, - procrecvfrom syscallFunc + procrecvfrom, + procport_create, + procport_associate, + procport_dissociate, + procport_get, + procport_getn syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1981,3 +1996,58 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_create() (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_associate(port int, source int, object uintptr, events int, user *byte) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_associate)), 5, uintptr(port), uintptr(source), uintptr(object), uintptr(events), uintptr(unsafe.Pointer(user)), 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_dissociate(port int, source int, object uintptr) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_dissociate)), 3, uintptr(port), uintptr(source), uintptr(object), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_get(port int, pe *portEvent, timeout *Timespec) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_get)), 3, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(unsafe.Pointer(timeout)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Timespec) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_getn)), 5, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fbc59b7fd..aa7ce85d1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -439,4 +439,9 @@ const ( SYS_PROCESS_MADVISE = 440 SYS_EPOLL_PWAIT2 = 441 SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 04d16d771..b83032638 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -7,358 +7,363 @@ package unix const ( - SYS_READ = 0 - SYS_WRITE = 1 - SYS_OPEN = 2 - SYS_CLOSE = 3 - SYS_STAT = 4 - SYS_FSTAT = 5 - SYS_LSTAT = 6 - SYS_POLL = 7 - SYS_LSEEK = 8 - SYS_MMAP = 9 - SYS_MPROTECT = 10 - SYS_MUNMAP = 11 - SYS_BRK = 12 - SYS_RT_SIGACTION = 13 - SYS_RT_SIGPROCMASK = 14 - SYS_RT_SIGRETURN = 15 - SYS_IOCTL = 16 - SYS_PREAD64 = 17 - SYS_PWRITE64 = 18 - SYS_READV = 19 - SYS_WRITEV = 20 - SYS_ACCESS = 21 - SYS_PIPE = 22 - SYS_SELECT = 23 - SYS_SCHED_YIELD = 24 - SYS_MREMAP = 25 - SYS_MSYNC = 26 - SYS_MINCORE = 27 - SYS_MADVISE = 28 - SYS_SHMGET = 29 - SYS_SHMAT = 30 - SYS_SHMCTL = 31 - SYS_DUP = 32 - SYS_DUP2 = 33 - SYS_PAUSE = 34 - SYS_NANOSLEEP = 35 - SYS_GETITIMER = 36 - SYS_ALARM = 37 - SYS_SETITIMER = 38 - SYS_GETPID = 39 - SYS_SENDFILE = 40 - SYS_SOCKET = 41 - SYS_CONNECT = 42 - SYS_ACCEPT = 43 - SYS_SENDTO = 44 - SYS_RECVFROM = 45 - SYS_SENDMSG = 46 - SYS_RECVMSG = 47 - SYS_SHUTDOWN = 48 - SYS_BIND = 49 - SYS_LISTEN = 50 - SYS_GETSOCKNAME = 51 - SYS_GETPEERNAME = 52 - SYS_SOCKETPAIR = 53 - SYS_SETSOCKOPT = 54 - SYS_GETSOCKOPT = 55 - SYS_CLONE = 56 - SYS_FORK = 57 - SYS_VFORK = 58 - SYS_EXECVE = 59 - SYS_EXIT = 60 - SYS_WAIT4 = 61 - SYS_KILL = 62 - SYS_UNAME = 63 - SYS_SEMGET = 64 - SYS_SEMOP = 65 - SYS_SEMCTL = 66 - SYS_SHMDT = 67 - SYS_MSGGET = 68 - SYS_MSGSND = 69 - SYS_MSGRCV = 70 - SYS_MSGCTL = 71 - SYS_FCNTL = 72 - SYS_FLOCK = 73 - SYS_FSYNC = 74 - SYS_FDATASYNC = 75 - SYS_TRUNCATE = 76 - SYS_FTRUNCATE = 77 - SYS_GETDENTS = 78 - SYS_GETCWD = 79 - SYS_CHDIR = 80 - SYS_FCHDIR = 81 - SYS_RENAME = 82 - SYS_MKDIR = 83 - SYS_RMDIR = 84 - SYS_CREAT = 85 - SYS_LINK = 86 - SYS_UNLINK = 87 - SYS_SYMLINK = 88 - SYS_READLINK = 89 - SYS_CHMOD = 90 - SYS_FCHMOD = 91 - SYS_CHOWN = 92 - SYS_FCHOWN = 93 - SYS_LCHOWN = 94 - SYS_UMASK = 95 - SYS_GETTIMEOFDAY = 96 - SYS_GETRLIMIT = 97 - SYS_GETRUSAGE = 98 - SYS_SYSINFO = 99 - SYS_TIMES = 100 - SYS_PTRACE = 101 - SYS_GETUID = 102 - SYS_SYSLOG = 103 - SYS_GETGID = 104 - SYS_SETUID = 105 - SYS_SETGID = 106 - SYS_GETEUID = 107 - SYS_GETEGID = 108 - SYS_SETPGID = 109 - SYS_GETPPID = 110 - SYS_GETPGRP = 111 - SYS_SETSID = 112 - SYS_SETREUID = 113 - SYS_SETREGID = 114 - SYS_GETGROUPS = 115 - SYS_SETGROUPS = 116 - SYS_SETRESUID = 117 - SYS_GETRESUID = 118 - SYS_SETRESGID = 119 - SYS_GETRESGID = 120 - SYS_GETPGID = 121 - SYS_SETFSUID = 122 - SYS_SETFSGID = 123 - SYS_GETSID = 124 - SYS_CAPGET = 125 - SYS_CAPSET = 126 - SYS_RT_SIGPENDING = 127 - SYS_RT_SIGTIMEDWAIT = 128 - SYS_RT_SIGQUEUEINFO = 129 - SYS_RT_SIGSUSPEND = 130 - SYS_SIGALTSTACK = 131 - SYS_UTIME = 132 - SYS_MKNOD = 133 - SYS_USELIB = 134 - SYS_PERSONALITY = 135 - SYS_USTAT = 136 - SYS_STATFS = 137 - SYS_FSTATFS = 138 - SYS_SYSFS = 139 - SYS_GETPRIORITY = 140 - SYS_SETPRIORITY = 141 - SYS_SCHED_SETPARAM = 142 - SYS_SCHED_GETPARAM = 143 - SYS_SCHED_SETSCHEDULER = 144 - SYS_SCHED_GETSCHEDULER = 145 - SYS_SCHED_GET_PRIORITY_MAX = 146 - SYS_SCHED_GET_PRIORITY_MIN = 147 - SYS_SCHED_RR_GET_INTERVAL = 148 - SYS_MLOCK = 149 - SYS_MUNLOCK = 150 - SYS_MLOCKALL = 151 - SYS_MUNLOCKALL = 152 - SYS_VHANGUP = 153 - SYS_MODIFY_LDT = 154 - SYS_PIVOT_ROOT = 155 - SYS__SYSCTL = 156 - SYS_PRCTL = 157 - SYS_ARCH_PRCTL = 158 - SYS_ADJTIMEX = 159 - SYS_SETRLIMIT = 160 - SYS_CHROOT = 161 - SYS_SYNC = 162 - SYS_ACCT = 163 - SYS_SETTIMEOFDAY = 164 - SYS_MOUNT = 165 - SYS_UMOUNT2 = 166 - SYS_SWAPON = 167 - SYS_SWAPOFF = 168 - SYS_REBOOT = 169 - SYS_SETHOSTNAME = 170 - SYS_SETDOMAINNAME = 171 - SYS_IOPL = 172 - SYS_IOPERM = 173 - SYS_CREATE_MODULE = 174 - SYS_INIT_MODULE = 175 - SYS_DELETE_MODULE = 176 - SYS_GET_KERNEL_SYMS = 177 - SYS_QUERY_MODULE = 178 - SYS_QUOTACTL = 179 - SYS_NFSSERVCTL = 180 - SYS_GETPMSG = 181 - SYS_PUTPMSG = 182 - SYS_AFS_SYSCALL = 183 - SYS_TUXCALL = 184 - SYS_SECURITY = 185 - SYS_GETTID = 186 - SYS_READAHEAD = 187 - SYS_SETXATTR = 188 - SYS_LSETXATTR = 189 - SYS_FSETXATTR = 190 - SYS_GETXATTR = 191 - SYS_LGETXATTR = 192 - SYS_FGETXATTR = 193 - SYS_LISTXATTR = 194 - SYS_LLISTXATTR = 195 - SYS_FLISTXATTR = 196 - SYS_REMOVEXATTR = 197 - SYS_LREMOVEXATTR = 198 - SYS_FREMOVEXATTR = 199 - SYS_TKILL = 200 - SYS_TIME = 201 - SYS_FUTEX = 202 - SYS_SCHED_SETAFFINITY = 203 - SYS_SCHED_GETAFFINITY = 204 - SYS_SET_THREAD_AREA = 205 - SYS_IO_SETUP = 206 - SYS_IO_DESTROY = 207 - SYS_IO_GETEVENTS = 208 - SYS_IO_SUBMIT = 209 - SYS_IO_CANCEL = 210 - SYS_GET_THREAD_AREA = 211 - SYS_LOOKUP_DCOOKIE = 212 - SYS_EPOLL_CREATE = 213 - SYS_EPOLL_CTL_OLD = 214 - SYS_EPOLL_WAIT_OLD = 215 - SYS_REMAP_FILE_PAGES = 216 - SYS_GETDENTS64 = 217 - SYS_SET_TID_ADDRESS = 218 - SYS_RESTART_SYSCALL = 219 - SYS_SEMTIMEDOP = 220 - SYS_FADVISE64 = 221 - SYS_TIMER_CREATE = 222 - SYS_TIMER_SETTIME = 223 - SYS_TIMER_GETTIME = 224 - SYS_TIMER_GETOVERRUN = 225 - SYS_TIMER_DELETE = 226 - SYS_CLOCK_SETTIME = 227 - SYS_CLOCK_GETTIME = 228 - SYS_CLOCK_GETRES = 229 - SYS_CLOCK_NANOSLEEP = 230 - SYS_EXIT_GROUP = 231 - SYS_EPOLL_WAIT = 232 - SYS_EPOLL_CTL = 233 - SYS_TGKILL = 234 - SYS_UTIMES = 235 - SYS_VSERVER = 236 - SYS_MBIND = 237 - SYS_SET_MEMPOLICY = 238 - SYS_GET_MEMPOLICY = 239 - SYS_MQ_OPEN = 240 - SYS_MQ_UNLINK = 241 - SYS_MQ_TIMEDSEND = 242 - SYS_MQ_TIMEDRECEIVE = 243 - SYS_MQ_NOTIFY = 244 - SYS_MQ_GETSETATTR = 245 - SYS_KEXEC_LOAD = 246 - SYS_WAITID = 247 - SYS_ADD_KEY = 248 - SYS_REQUEST_KEY = 249 - SYS_KEYCTL = 250 - SYS_IOPRIO_SET = 251 - SYS_IOPRIO_GET = 252 - SYS_INOTIFY_INIT = 253 - SYS_INOTIFY_ADD_WATCH = 254 - SYS_INOTIFY_RM_WATCH = 255 - SYS_MIGRATE_PAGES = 256 - SYS_OPENAT = 257 - SYS_MKDIRAT = 258 - SYS_MKNODAT = 259 - SYS_FCHOWNAT = 260 - SYS_FUTIMESAT = 261 - SYS_NEWFSTATAT = 262 - SYS_UNLINKAT = 263 - SYS_RENAMEAT = 264 - SYS_LINKAT = 265 - SYS_SYMLINKAT = 266 - SYS_READLINKAT = 267 - SYS_FCHMODAT = 268 - SYS_FACCESSAT = 269 - SYS_PSELECT6 = 270 - SYS_PPOLL = 271 - SYS_UNSHARE = 272 - SYS_SET_ROBUST_LIST = 273 - SYS_GET_ROBUST_LIST = 274 - SYS_SPLICE = 275 - SYS_TEE = 276 - SYS_SYNC_FILE_RANGE = 277 - SYS_VMSPLICE = 278 - SYS_MOVE_PAGES = 279 - SYS_UTIMENSAT = 280 - SYS_EPOLL_PWAIT = 281 - SYS_SIGNALFD = 282 - SYS_TIMERFD_CREATE = 283 - SYS_EVENTFD = 284 - SYS_FALLOCATE = 285 - SYS_TIMERFD_SETTIME = 286 - SYS_TIMERFD_GETTIME = 287 - SYS_ACCEPT4 = 288 - SYS_SIGNALFD4 = 289 - SYS_EVENTFD2 = 290 - SYS_EPOLL_CREATE1 = 291 - SYS_DUP3 = 292 - SYS_PIPE2 = 293 - SYS_INOTIFY_INIT1 = 294 - SYS_PREADV = 295 - SYS_PWRITEV = 296 - SYS_RT_TGSIGQUEUEINFO = 297 - SYS_PERF_EVENT_OPEN = 298 - SYS_RECVMMSG = 299 - SYS_FANOTIFY_INIT = 300 - SYS_FANOTIFY_MARK = 301 - SYS_PRLIMIT64 = 302 - SYS_NAME_TO_HANDLE_AT = 303 - SYS_OPEN_BY_HANDLE_AT = 304 - SYS_CLOCK_ADJTIME = 305 - SYS_SYNCFS = 306 - SYS_SENDMMSG = 307 - SYS_SETNS = 308 - SYS_GETCPU = 309 - SYS_PROCESS_VM_READV = 310 - SYS_PROCESS_VM_WRITEV = 311 - SYS_KCMP = 312 - SYS_FINIT_MODULE = 313 - SYS_SCHED_SETATTR = 314 - SYS_SCHED_GETATTR = 315 - SYS_RENAMEAT2 = 316 - SYS_SECCOMP = 317 - SYS_GETRANDOM = 318 - SYS_MEMFD_CREATE = 319 - SYS_KEXEC_FILE_LOAD = 320 - SYS_BPF = 321 - SYS_EXECVEAT = 322 - SYS_USERFAULTFD = 323 - SYS_MEMBARRIER = 324 - SYS_MLOCK2 = 325 - SYS_COPY_FILE_RANGE = 326 - SYS_PREADV2 = 327 - SYS_PWRITEV2 = 328 - SYS_PKEY_MPROTECT = 329 - SYS_PKEY_ALLOC = 330 - SYS_PKEY_FREE = 331 - SYS_STATX = 332 - SYS_IO_PGETEVENTS = 333 - SYS_RSEQ = 334 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_READ = 0 + SYS_WRITE = 1 + SYS_OPEN = 2 + SYS_CLOSE = 3 + SYS_STAT = 4 + SYS_FSTAT = 5 + SYS_LSTAT = 6 + SYS_POLL = 7 + SYS_LSEEK = 8 + SYS_MMAP = 9 + SYS_MPROTECT = 10 + SYS_MUNMAP = 11 + SYS_BRK = 12 + SYS_RT_SIGACTION = 13 + SYS_RT_SIGPROCMASK = 14 + SYS_RT_SIGRETURN = 15 + SYS_IOCTL = 16 + SYS_PREAD64 = 17 + SYS_PWRITE64 = 18 + SYS_READV = 19 + SYS_WRITEV = 20 + SYS_ACCESS = 21 + SYS_PIPE = 22 + SYS_SELECT = 23 + SYS_SCHED_YIELD = 24 + SYS_MREMAP = 25 + SYS_MSYNC = 26 + SYS_MINCORE = 27 + SYS_MADVISE = 28 + SYS_SHMGET = 29 + SYS_SHMAT = 30 + SYS_SHMCTL = 31 + SYS_DUP = 32 + SYS_DUP2 = 33 + SYS_PAUSE = 34 + SYS_NANOSLEEP = 35 + SYS_GETITIMER = 36 + SYS_ALARM = 37 + SYS_SETITIMER = 38 + SYS_GETPID = 39 + SYS_SENDFILE = 40 + SYS_SOCKET = 41 + SYS_CONNECT = 42 + SYS_ACCEPT = 43 + SYS_SENDTO = 44 + SYS_RECVFROM = 45 + SYS_SENDMSG = 46 + SYS_RECVMSG = 47 + SYS_SHUTDOWN = 48 + SYS_BIND = 49 + SYS_LISTEN = 50 + SYS_GETSOCKNAME = 51 + SYS_GETPEERNAME = 52 + SYS_SOCKETPAIR = 53 + SYS_SETSOCKOPT = 54 + SYS_GETSOCKOPT = 55 + SYS_CLONE = 56 + SYS_FORK = 57 + SYS_VFORK = 58 + SYS_EXECVE = 59 + SYS_EXIT = 60 + SYS_WAIT4 = 61 + SYS_KILL = 62 + SYS_UNAME = 63 + SYS_SEMGET = 64 + SYS_SEMOP = 65 + SYS_SEMCTL = 66 + SYS_SHMDT = 67 + SYS_MSGGET = 68 + SYS_MSGSND = 69 + SYS_MSGRCV = 70 + SYS_MSGCTL = 71 + SYS_FCNTL = 72 + SYS_FLOCK = 73 + SYS_FSYNC = 74 + SYS_FDATASYNC = 75 + SYS_TRUNCATE = 76 + SYS_FTRUNCATE = 77 + SYS_GETDENTS = 78 + SYS_GETCWD = 79 + SYS_CHDIR = 80 + SYS_FCHDIR = 81 + SYS_RENAME = 82 + SYS_MKDIR = 83 + SYS_RMDIR = 84 + SYS_CREAT = 85 + SYS_LINK = 86 + SYS_UNLINK = 87 + SYS_SYMLINK = 88 + SYS_READLINK = 89 + SYS_CHMOD = 90 + SYS_FCHMOD = 91 + SYS_CHOWN = 92 + SYS_FCHOWN = 93 + SYS_LCHOWN = 94 + SYS_UMASK = 95 + SYS_GETTIMEOFDAY = 96 + SYS_GETRLIMIT = 97 + SYS_GETRUSAGE = 98 + SYS_SYSINFO = 99 + SYS_TIMES = 100 + SYS_PTRACE = 101 + SYS_GETUID = 102 + SYS_SYSLOG = 103 + SYS_GETGID = 104 + SYS_SETUID = 105 + SYS_SETGID = 106 + SYS_GETEUID = 107 + SYS_GETEGID = 108 + SYS_SETPGID = 109 + SYS_GETPPID = 110 + SYS_GETPGRP = 111 + SYS_SETSID = 112 + SYS_SETREUID = 113 + SYS_SETREGID = 114 + SYS_GETGROUPS = 115 + SYS_SETGROUPS = 116 + SYS_SETRESUID = 117 + SYS_GETRESUID = 118 + SYS_SETRESGID = 119 + SYS_GETRESGID = 120 + SYS_GETPGID = 121 + SYS_SETFSUID = 122 + SYS_SETFSGID = 123 + SYS_GETSID = 124 + SYS_CAPGET = 125 + SYS_CAPSET = 126 + SYS_RT_SIGPENDING = 127 + SYS_RT_SIGTIMEDWAIT = 128 + SYS_RT_SIGQUEUEINFO = 129 + SYS_RT_SIGSUSPEND = 130 + SYS_SIGALTSTACK = 131 + SYS_UTIME = 132 + SYS_MKNOD = 133 + SYS_USELIB = 134 + SYS_PERSONALITY = 135 + SYS_USTAT = 136 + SYS_STATFS = 137 + SYS_FSTATFS = 138 + SYS_SYSFS = 139 + SYS_GETPRIORITY = 140 + SYS_SETPRIORITY = 141 + SYS_SCHED_SETPARAM = 142 + SYS_SCHED_GETPARAM = 143 + SYS_SCHED_SETSCHEDULER = 144 + SYS_SCHED_GETSCHEDULER = 145 + SYS_SCHED_GET_PRIORITY_MAX = 146 + SYS_SCHED_GET_PRIORITY_MIN = 147 + SYS_SCHED_RR_GET_INTERVAL = 148 + SYS_MLOCK = 149 + SYS_MUNLOCK = 150 + SYS_MLOCKALL = 151 + SYS_MUNLOCKALL = 152 + SYS_VHANGUP = 153 + SYS_MODIFY_LDT = 154 + SYS_PIVOT_ROOT = 155 + SYS__SYSCTL = 156 + SYS_PRCTL = 157 + SYS_ARCH_PRCTL = 158 + SYS_ADJTIMEX = 159 + SYS_SETRLIMIT = 160 + SYS_CHROOT = 161 + SYS_SYNC = 162 + SYS_ACCT = 163 + SYS_SETTIMEOFDAY = 164 + SYS_MOUNT = 165 + SYS_UMOUNT2 = 166 + SYS_SWAPON = 167 + SYS_SWAPOFF = 168 + SYS_REBOOT = 169 + SYS_SETHOSTNAME = 170 + SYS_SETDOMAINNAME = 171 + SYS_IOPL = 172 + SYS_IOPERM = 173 + SYS_CREATE_MODULE = 174 + SYS_INIT_MODULE = 175 + SYS_DELETE_MODULE = 176 + SYS_GET_KERNEL_SYMS = 177 + SYS_QUERY_MODULE = 178 + SYS_QUOTACTL = 179 + SYS_NFSSERVCTL = 180 + SYS_GETPMSG = 181 + SYS_PUTPMSG = 182 + SYS_AFS_SYSCALL = 183 + SYS_TUXCALL = 184 + SYS_SECURITY = 185 + SYS_GETTID = 186 + SYS_READAHEAD = 187 + SYS_SETXATTR = 188 + SYS_LSETXATTR = 189 + SYS_FSETXATTR = 190 + SYS_GETXATTR = 191 + SYS_LGETXATTR = 192 + SYS_FGETXATTR = 193 + SYS_LISTXATTR = 194 + SYS_LLISTXATTR = 195 + SYS_FLISTXATTR = 196 + SYS_REMOVEXATTR = 197 + SYS_LREMOVEXATTR = 198 + SYS_FREMOVEXATTR = 199 + SYS_TKILL = 200 + SYS_TIME = 201 + SYS_FUTEX = 202 + SYS_SCHED_SETAFFINITY = 203 + SYS_SCHED_GETAFFINITY = 204 + SYS_SET_THREAD_AREA = 205 + SYS_IO_SETUP = 206 + SYS_IO_DESTROY = 207 + SYS_IO_GETEVENTS = 208 + SYS_IO_SUBMIT = 209 + SYS_IO_CANCEL = 210 + SYS_GET_THREAD_AREA = 211 + SYS_LOOKUP_DCOOKIE = 212 + SYS_EPOLL_CREATE = 213 + SYS_EPOLL_CTL_OLD = 214 + SYS_EPOLL_WAIT_OLD = 215 + SYS_REMAP_FILE_PAGES = 216 + SYS_GETDENTS64 = 217 + SYS_SET_TID_ADDRESS = 218 + SYS_RESTART_SYSCALL = 219 + SYS_SEMTIMEDOP = 220 + SYS_FADVISE64 = 221 + SYS_TIMER_CREATE = 222 + SYS_TIMER_SETTIME = 223 + SYS_TIMER_GETTIME = 224 + SYS_TIMER_GETOVERRUN = 225 + SYS_TIMER_DELETE = 226 + SYS_CLOCK_SETTIME = 227 + SYS_CLOCK_GETTIME = 228 + SYS_CLOCK_GETRES = 229 + SYS_CLOCK_NANOSLEEP = 230 + SYS_EXIT_GROUP = 231 + SYS_EPOLL_WAIT = 232 + SYS_EPOLL_CTL = 233 + SYS_TGKILL = 234 + SYS_UTIMES = 235 + SYS_VSERVER = 236 + SYS_MBIND = 237 + SYS_SET_MEMPOLICY = 238 + SYS_GET_MEMPOLICY = 239 + SYS_MQ_OPEN = 240 + SYS_MQ_UNLINK = 241 + SYS_MQ_TIMEDSEND = 242 + SYS_MQ_TIMEDRECEIVE = 243 + SYS_MQ_NOTIFY = 244 + SYS_MQ_GETSETATTR = 245 + SYS_KEXEC_LOAD = 246 + SYS_WAITID = 247 + SYS_ADD_KEY = 248 + SYS_REQUEST_KEY = 249 + SYS_KEYCTL = 250 + SYS_IOPRIO_SET = 251 + SYS_IOPRIO_GET = 252 + SYS_INOTIFY_INIT = 253 + SYS_INOTIFY_ADD_WATCH = 254 + SYS_INOTIFY_RM_WATCH = 255 + SYS_MIGRATE_PAGES = 256 + SYS_OPENAT = 257 + SYS_MKDIRAT = 258 + SYS_MKNODAT = 259 + SYS_FCHOWNAT = 260 + SYS_FUTIMESAT = 261 + SYS_NEWFSTATAT = 262 + SYS_UNLINKAT = 263 + SYS_RENAMEAT = 264 + SYS_LINKAT = 265 + SYS_SYMLINKAT = 266 + SYS_READLINKAT = 267 + SYS_FCHMODAT = 268 + SYS_FACCESSAT = 269 + SYS_PSELECT6 = 270 + SYS_PPOLL = 271 + SYS_UNSHARE = 272 + SYS_SET_ROBUST_LIST = 273 + SYS_GET_ROBUST_LIST = 274 + SYS_SPLICE = 275 + SYS_TEE = 276 + SYS_SYNC_FILE_RANGE = 277 + SYS_VMSPLICE = 278 + SYS_MOVE_PAGES = 279 + SYS_UTIMENSAT = 280 + SYS_EPOLL_PWAIT = 281 + SYS_SIGNALFD = 282 + SYS_TIMERFD_CREATE = 283 + SYS_EVENTFD = 284 + SYS_FALLOCATE = 285 + SYS_TIMERFD_SETTIME = 286 + SYS_TIMERFD_GETTIME = 287 + SYS_ACCEPT4 = 288 + SYS_SIGNALFD4 = 289 + SYS_EVENTFD2 = 290 + SYS_EPOLL_CREATE1 = 291 + SYS_DUP3 = 292 + SYS_PIPE2 = 293 + SYS_INOTIFY_INIT1 = 294 + SYS_PREADV = 295 + SYS_PWRITEV = 296 + SYS_RT_TGSIGQUEUEINFO = 297 + SYS_PERF_EVENT_OPEN = 298 + SYS_RECVMMSG = 299 + SYS_FANOTIFY_INIT = 300 + SYS_FANOTIFY_MARK = 301 + SYS_PRLIMIT64 = 302 + SYS_NAME_TO_HANDLE_AT = 303 + SYS_OPEN_BY_HANDLE_AT = 304 + SYS_CLOCK_ADJTIME = 305 + SYS_SYNCFS = 306 + SYS_SENDMMSG = 307 + SYS_SETNS = 308 + SYS_GETCPU = 309 + SYS_PROCESS_VM_READV = 310 + SYS_PROCESS_VM_WRITEV = 311 + SYS_KCMP = 312 + SYS_FINIT_MODULE = 313 + SYS_SCHED_SETATTR = 314 + SYS_SCHED_GETATTR = 315 + SYS_RENAMEAT2 = 316 + SYS_SECCOMP = 317 + SYS_GETRANDOM = 318 + SYS_MEMFD_CREATE = 319 + SYS_KEXEC_FILE_LOAD = 320 + SYS_BPF = 321 + SYS_EXECVEAT = 322 + SYS_USERFAULTFD = 323 + SYS_MEMBARRIER = 324 + SYS_MLOCK2 = 325 + SYS_COPY_FILE_RANGE = 326 + SYS_PREADV2 = 327 + SYS_PWRITEV2 = 328 + SYS_PKEY_MPROTECT = 329 + SYS_PKEY_ALLOC = 330 + SYS_PKEY_FREE = 331 + SYS_STATX = 332 + SYS_IO_PGETEVENTS = 333 + SYS_RSEQ = 334 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 3b1c10513..d75f65a0a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -403,4 +403,8 @@ const ( SYS_PROCESS_MADVISE = 440 SYS_EPOLL_PWAIT2 = 441 SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 3198adcf7..8b02f09e9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -7,303 +7,308 @@ package unix const ( - SYS_IO_SETUP = 0 - SYS_IO_DESTROY = 1 - SYS_IO_SUBMIT = 2 - SYS_IO_CANCEL = 3 - SYS_IO_GETEVENTS = 4 - SYS_SETXATTR = 5 - SYS_LSETXATTR = 6 - SYS_FSETXATTR = 7 - SYS_GETXATTR = 8 - SYS_LGETXATTR = 9 - SYS_FGETXATTR = 10 - SYS_LISTXATTR = 11 - SYS_LLISTXATTR = 12 - SYS_FLISTXATTR = 13 - SYS_REMOVEXATTR = 14 - SYS_LREMOVEXATTR = 15 - SYS_FREMOVEXATTR = 16 - SYS_GETCWD = 17 - SYS_LOOKUP_DCOOKIE = 18 - SYS_EVENTFD2 = 19 - SYS_EPOLL_CREATE1 = 20 - SYS_EPOLL_CTL = 21 - SYS_EPOLL_PWAIT = 22 - SYS_DUP = 23 - SYS_DUP3 = 24 - SYS_FCNTL = 25 - SYS_INOTIFY_INIT1 = 26 - SYS_INOTIFY_ADD_WATCH = 27 - SYS_INOTIFY_RM_WATCH = 28 - SYS_IOCTL = 29 - SYS_IOPRIO_SET = 30 - SYS_IOPRIO_GET = 31 - SYS_FLOCK = 32 - SYS_MKNODAT = 33 - SYS_MKDIRAT = 34 - SYS_UNLINKAT = 35 - SYS_SYMLINKAT = 36 - SYS_LINKAT = 37 - SYS_RENAMEAT = 38 - SYS_UMOUNT2 = 39 - SYS_MOUNT = 40 - SYS_PIVOT_ROOT = 41 - SYS_NFSSERVCTL = 42 - SYS_STATFS = 43 - SYS_FSTATFS = 44 - SYS_TRUNCATE = 45 - SYS_FTRUNCATE = 46 - SYS_FALLOCATE = 47 - SYS_FACCESSAT = 48 - SYS_CHDIR = 49 - SYS_FCHDIR = 50 - SYS_CHROOT = 51 - SYS_FCHMOD = 52 - SYS_FCHMODAT = 53 - SYS_FCHOWNAT = 54 - SYS_FCHOWN = 55 - SYS_OPENAT = 56 - SYS_CLOSE = 57 - SYS_VHANGUP = 58 - SYS_PIPE2 = 59 - SYS_QUOTACTL = 60 - SYS_GETDENTS64 = 61 - SYS_LSEEK = 62 - SYS_READ = 63 - SYS_WRITE = 64 - SYS_READV = 65 - SYS_WRITEV = 66 - SYS_PREAD64 = 67 - SYS_PWRITE64 = 68 - SYS_PREADV = 69 - SYS_PWRITEV = 70 - SYS_SENDFILE = 71 - SYS_PSELECT6 = 72 - SYS_PPOLL = 73 - SYS_SIGNALFD4 = 74 - SYS_VMSPLICE = 75 - SYS_SPLICE = 76 - SYS_TEE = 77 - SYS_READLINKAT = 78 - SYS_FSTATAT = 79 - SYS_FSTAT = 80 - SYS_SYNC = 81 - SYS_FSYNC = 82 - SYS_FDATASYNC = 83 - SYS_SYNC_FILE_RANGE = 84 - SYS_TIMERFD_CREATE = 85 - SYS_TIMERFD_SETTIME = 86 - SYS_TIMERFD_GETTIME = 87 - SYS_UTIMENSAT = 88 - SYS_ACCT = 89 - SYS_CAPGET = 90 - SYS_CAPSET = 91 - SYS_PERSONALITY = 92 - SYS_EXIT = 93 - SYS_EXIT_GROUP = 94 - SYS_WAITID = 95 - SYS_SET_TID_ADDRESS = 96 - SYS_UNSHARE = 97 - SYS_FUTEX = 98 - SYS_SET_ROBUST_LIST = 99 - SYS_GET_ROBUST_LIST = 100 - SYS_NANOSLEEP = 101 - SYS_GETITIMER = 102 - SYS_SETITIMER = 103 - SYS_KEXEC_LOAD = 104 - SYS_INIT_MODULE = 105 - SYS_DELETE_MODULE = 106 - SYS_TIMER_CREATE = 107 - SYS_TIMER_GETTIME = 108 - SYS_TIMER_GETOVERRUN = 109 - SYS_TIMER_SETTIME = 110 - SYS_TIMER_DELETE = 111 - SYS_CLOCK_SETTIME = 112 - SYS_CLOCK_GETTIME = 113 - SYS_CLOCK_GETRES = 114 - SYS_CLOCK_NANOSLEEP = 115 - SYS_SYSLOG = 116 - SYS_PTRACE = 117 - SYS_SCHED_SETPARAM = 118 - SYS_SCHED_SETSCHEDULER = 119 - SYS_SCHED_GETSCHEDULER = 120 - SYS_SCHED_GETPARAM = 121 - SYS_SCHED_SETAFFINITY = 122 - SYS_SCHED_GETAFFINITY = 123 - SYS_SCHED_YIELD = 124 - SYS_SCHED_GET_PRIORITY_MAX = 125 - SYS_SCHED_GET_PRIORITY_MIN = 126 - SYS_SCHED_RR_GET_INTERVAL = 127 - SYS_RESTART_SYSCALL = 128 - SYS_KILL = 129 - SYS_TKILL = 130 - SYS_TGKILL = 131 - SYS_SIGALTSTACK = 132 - SYS_RT_SIGSUSPEND = 133 - SYS_RT_SIGACTION = 134 - SYS_RT_SIGPROCMASK = 135 - SYS_RT_SIGPENDING = 136 - SYS_RT_SIGTIMEDWAIT = 137 - SYS_RT_SIGQUEUEINFO = 138 - SYS_RT_SIGRETURN = 139 - SYS_SETPRIORITY = 140 - SYS_GETPRIORITY = 141 - SYS_REBOOT = 142 - SYS_SETREGID = 143 - SYS_SETGID = 144 - SYS_SETREUID = 145 - SYS_SETUID = 146 - SYS_SETRESUID = 147 - SYS_GETRESUID = 148 - SYS_SETRESGID = 149 - SYS_GETRESGID = 150 - SYS_SETFSUID = 151 - SYS_SETFSGID = 152 - SYS_TIMES = 153 - SYS_SETPGID = 154 - SYS_GETPGID = 155 - SYS_GETSID = 156 - SYS_SETSID = 157 - SYS_GETGROUPS = 158 - SYS_SETGROUPS = 159 - SYS_UNAME = 160 - SYS_SETHOSTNAME = 161 - SYS_SETDOMAINNAME = 162 - SYS_GETRLIMIT = 163 - SYS_SETRLIMIT = 164 - SYS_GETRUSAGE = 165 - SYS_UMASK = 166 - SYS_PRCTL = 167 - SYS_GETCPU = 168 - SYS_GETTIMEOFDAY = 169 - SYS_SETTIMEOFDAY = 170 - SYS_ADJTIMEX = 171 - SYS_GETPID = 172 - SYS_GETPPID = 173 - SYS_GETUID = 174 - SYS_GETEUID = 175 - SYS_GETGID = 176 - SYS_GETEGID = 177 - SYS_GETTID = 178 - SYS_SYSINFO = 179 - SYS_MQ_OPEN = 180 - SYS_MQ_UNLINK = 181 - SYS_MQ_TIMEDSEND = 182 - SYS_MQ_TIMEDRECEIVE = 183 - SYS_MQ_NOTIFY = 184 - SYS_MQ_GETSETATTR = 185 - SYS_MSGGET = 186 - SYS_MSGCTL = 187 - SYS_MSGRCV = 188 - SYS_MSGSND = 189 - SYS_SEMGET = 190 - SYS_SEMCTL = 191 - SYS_SEMTIMEDOP = 192 - SYS_SEMOP = 193 - SYS_SHMGET = 194 - SYS_SHMCTL = 195 - SYS_SHMAT = 196 - SYS_SHMDT = 197 - SYS_SOCKET = 198 - SYS_SOCKETPAIR = 199 - SYS_BIND = 200 - SYS_LISTEN = 201 - SYS_ACCEPT = 202 - SYS_CONNECT = 203 - SYS_GETSOCKNAME = 204 - SYS_GETPEERNAME = 205 - SYS_SENDTO = 206 - SYS_RECVFROM = 207 - SYS_SETSOCKOPT = 208 - SYS_GETSOCKOPT = 209 - SYS_SHUTDOWN = 210 - SYS_SENDMSG = 211 - SYS_RECVMSG = 212 - SYS_READAHEAD = 213 - SYS_BRK = 214 - SYS_MUNMAP = 215 - SYS_MREMAP = 216 - SYS_ADD_KEY = 217 - SYS_REQUEST_KEY = 218 - SYS_KEYCTL = 219 - SYS_CLONE = 220 - SYS_EXECVE = 221 - SYS_MMAP = 222 - SYS_FADVISE64 = 223 - SYS_SWAPON = 224 - SYS_SWAPOFF = 225 - SYS_MPROTECT = 226 - SYS_MSYNC = 227 - SYS_MLOCK = 228 - SYS_MUNLOCK = 229 - SYS_MLOCKALL = 230 - SYS_MUNLOCKALL = 231 - SYS_MINCORE = 232 - SYS_MADVISE = 233 - SYS_REMAP_FILE_PAGES = 234 - SYS_MBIND = 235 - SYS_GET_MEMPOLICY = 236 - SYS_SET_MEMPOLICY = 237 - SYS_MIGRATE_PAGES = 238 - SYS_MOVE_PAGES = 239 - SYS_RT_TGSIGQUEUEINFO = 240 - SYS_PERF_EVENT_OPEN = 241 - SYS_ACCEPT4 = 242 - SYS_RECVMMSG = 243 - SYS_ARCH_SPECIFIC_SYSCALL = 244 - SYS_WAIT4 = 260 - SYS_PRLIMIT64 = 261 - SYS_FANOTIFY_INIT = 262 - SYS_FANOTIFY_MARK = 263 - SYS_NAME_TO_HANDLE_AT = 264 - SYS_OPEN_BY_HANDLE_AT = 265 - SYS_CLOCK_ADJTIME = 266 - SYS_SYNCFS = 267 - SYS_SETNS = 268 - SYS_SENDMMSG = 269 - SYS_PROCESS_VM_READV = 270 - SYS_PROCESS_VM_WRITEV = 271 - SYS_KCMP = 272 - SYS_FINIT_MODULE = 273 - SYS_SCHED_SETATTR = 274 - SYS_SCHED_GETATTR = 275 - SYS_RENAMEAT2 = 276 - SYS_SECCOMP = 277 - SYS_GETRANDOM = 278 - SYS_MEMFD_CREATE = 279 - SYS_BPF = 280 - SYS_EXECVEAT = 281 - SYS_USERFAULTFD = 282 - SYS_MEMBARRIER = 283 - SYS_MLOCK2 = 284 - SYS_COPY_FILE_RANGE = 285 - SYS_PREADV2 = 286 - SYS_PWRITEV2 = 287 - SYS_PKEY_MPROTECT = 288 - SYS_PKEY_ALLOC = 289 - SYS_PKEY_FREE = 290 - SYS_STATX = 291 - SYS_IO_PGETEVENTS = 292 - SYS_RSEQ = 293 - SYS_KEXEC_FILE_LOAD = 294 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_RENAMEAT = 38 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_FSTATAT = 79 + SYS_FSTAT = 80 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRLIMIT = 163 + SYS_SETRLIMIT = 164 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index c877ec6e6..026695abb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -424,4 +424,8 @@ const ( SYS_PROCESS_MADVISE = 4440 SYS_EPOLL_PWAIT2 = 4441 SYS_MOUNT_SETATTR = 4442 + SYS_QUOTACTL_FD = 4443 + SYS_LANDLOCK_CREATE_RULESET = 4444 + SYS_LANDLOCK_ADD_RULE = 4445 + SYS_LANDLOCK_RESTRICT_SELF = 4446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index b5f290372..7320ba958 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -7,351 +7,355 @@ package unix const ( - SYS_READ = 5000 - SYS_WRITE = 5001 - SYS_OPEN = 5002 - SYS_CLOSE = 5003 - SYS_STAT = 5004 - SYS_FSTAT = 5005 - SYS_LSTAT = 5006 - SYS_POLL = 5007 - SYS_LSEEK = 5008 - SYS_MMAP = 5009 - SYS_MPROTECT = 5010 - SYS_MUNMAP = 5011 - SYS_BRK = 5012 - SYS_RT_SIGACTION = 5013 - SYS_RT_SIGPROCMASK = 5014 - SYS_IOCTL = 5015 - SYS_PREAD64 = 5016 - SYS_PWRITE64 = 5017 - SYS_READV = 5018 - SYS_WRITEV = 5019 - SYS_ACCESS = 5020 - SYS_PIPE = 5021 - SYS__NEWSELECT = 5022 - SYS_SCHED_YIELD = 5023 - SYS_MREMAP = 5024 - SYS_MSYNC = 5025 - SYS_MINCORE = 5026 - SYS_MADVISE = 5027 - SYS_SHMGET = 5028 - SYS_SHMAT = 5029 - SYS_SHMCTL = 5030 - SYS_DUP = 5031 - SYS_DUP2 = 5032 - SYS_PAUSE = 5033 - SYS_NANOSLEEP = 5034 - SYS_GETITIMER = 5035 - SYS_SETITIMER = 5036 - SYS_ALARM = 5037 - SYS_GETPID = 5038 - SYS_SENDFILE = 5039 - SYS_SOCKET = 5040 - SYS_CONNECT = 5041 - SYS_ACCEPT = 5042 - SYS_SENDTO = 5043 - SYS_RECVFROM = 5044 - SYS_SENDMSG = 5045 - SYS_RECVMSG = 5046 - SYS_SHUTDOWN = 5047 - SYS_BIND = 5048 - SYS_LISTEN = 5049 - SYS_GETSOCKNAME = 5050 - SYS_GETPEERNAME = 5051 - SYS_SOCKETPAIR = 5052 - SYS_SETSOCKOPT = 5053 - SYS_GETSOCKOPT = 5054 - SYS_CLONE = 5055 - SYS_FORK = 5056 - SYS_EXECVE = 5057 - SYS_EXIT = 5058 - SYS_WAIT4 = 5059 - SYS_KILL = 5060 - SYS_UNAME = 5061 - SYS_SEMGET = 5062 - SYS_SEMOP = 5063 - SYS_SEMCTL = 5064 - SYS_SHMDT = 5065 - SYS_MSGGET = 5066 - SYS_MSGSND = 5067 - SYS_MSGRCV = 5068 - SYS_MSGCTL = 5069 - SYS_FCNTL = 5070 - SYS_FLOCK = 5071 - SYS_FSYNC = 5072 - SYS_FDATASYNC = 5073 - SYS_TRUNCATE = 5074 - SYS_FTRUNCATE = 5075 - SYS_GETDENTS = 5076 - SYS_GETCWD = 5077 - SYS_CHDIR = 5078 - SYS_FCHDIR = 5079 - SYS_RENAME = 5080 - SYS_MKDIR = 5081 - SYS_RMDIR = 5082 - SYS_CREAT = 5083 - SYS_LINK = 5084 - SYS_UNLINK = 5085 - SYS_SYMLINK = 5086 - SYS_READLINK = 5087 - SYS_CHMOD = 5088 - SYS_FCHMOD = 5089 - SYS_CHOWN = 5090 - SYS_FCHOWN = 5091 - SYS_LCHOWN = 5092 - SYS_UMASK = 5093 - SYS_GETTIMEOFDAY = 5094 - SYS_GETRLIMIT = 5095 - SYS_GETRUSAGE = 5096 - SYS_SYSINFO = 5097 - SYS_TIMES = 5098 - SYS_PTRACE = 5099 - SYS_GETUID = 5100 - SYS_SYSLOG = 5101 - SYS_GETGID = 5102 - SYS_SETUID = 5103 - SYS_SETGID = 5104 - SYS_GETEUID = 5105 - SYS_GETEGID = 5106 - SYS_SETPGID = 5107 - SYS_GETPPID = 5108 - SYS_GETPGRP = 5109 - SYS_SETSID = 5110 - SYS_SETREUID = 5111 - SYS_SETREGID = 5112 - SYS_GETGROUPS = 5113 - SYS_SETGROUPS = 5114 - SYS_SETRESUID = 5115 - SYS_GETRESUID = 5116 - SYS_SETRESGID = 5117 - SYS_GETRESGID = 5118 - SYS_GETPGID = 5119 - SYS_SETFSUID = 5120 - SYS_SETFSGID = 5121 - SYS_GETSID = 5122 - SYS_CAPGET = 5123 - SYS_CAPSET = 5124 - SYS_RT_SIGPENDING = 5125 - SYS_RT_SIGTIMEDWAIT = 5126 - SYS_RT_SIGQUEUEINFO = 5127 - SYS_RT_SIGSUSPEND = 5128 - SYS_SIGALTSTACK = 5129 - SYS_UTIME = 5130 - SYS_MKNOD = 5131 - SYS_PERSONALITY = 5132 - SYS_USTAT = 5133 - SYS_STATFS = 5134 - SYS_FSTATFS = 5135 - SYS_SYSFS = 5136 - SYS_GETPRIORITY = 5137 - SYS_SETPRIORITY = 5138 - SYS_SCHED_SETPARAM = 5139 - SYS_SCHED_GETPARAM = 5140 - SYS_SCHED_SETSCHEDULER = 5141 - SYS_SCHED_GETSCHEDULER = 5142 - SYS_SCHED_GET_PRIORITY_MAX = 5143 - SYS_SCHED_GET_PRIORITY_MIN = 5144 - SYS_SCHED_RR_GET_INTERVAL = 5145 - SYS_MLOCK = 5146 - SYS_MUNLOCK = 5147 - SYS_MLOCKALL = 5148 - SYS_MUNLOCKALL = 5149 - SYS_VHANGUP = 5150 - SYS_PIVOT_ROOT = 5151 - SYS__SYSCTL = 5152 - SYS_PRCTL = 5153 - SYS_ADJTIMEX = 5154 - SYS_SETRLIMIT = 5155 - SYS_CHROOT = 5156 - SYS_SYNC = 5157 - SYS_ACCT = 5158 - SYS_SETTIMEOFDAY = 5159 - SYS_MOUNT = 5160 - SYS_UMOUNT2 = 5161 - SYS_SWAPON = 5162 - SYS_SWAPOFF = 5163 - SYS_REBOOT = 5164 - SYS_SETHOSTNAME = 5165 - SYS_SETDOMAINNAME = 5166 - SYS_CREATE_MODULE = 5167 - SYS_INIT_MODULE = 5168 - SYS_DELETE_MODULE = 5169 - SYS_GET_KERNEL_SYMS = 5170 - SYS_QUERY_MODULE = 5171 - SYS_QUOTACTL = 5172 - SYS_NFSSERVCTL = 5173 - SYS_GETPMSG = 5174 - SYS_PUTPMSG = 5175 - SYS_AFS_SYSCALL = 5176 - SYS_RESERVED177 = 5177 - SYS_GETTID = 5178 - SYS_READAHEAD = 5179 - SYS_SETXATTR = 5180 - SYS_LSETXATTR = 5181 - SYS_FSETXATTR = 5182 - SYS_GETXATTR = 5183 - SYS_LGETXATTR = 5184 - SYS_FGETXATTR = 5185 - SYS_LISTXATTR = 5186 - SYS_LLISTXATTR = 5187 - SYS_FLISTXATTR = 5188 - SYS_REMOVEXATTR = 5189 - SYS_LREMOVEXATTR = 5190 - SYS_FREMOVEXATTR = 5191 - SYS_TKILL = 5192 - SYS_RESERVED193 = 5193 - SYS_FUTEX = 5194 - SYS_SCHED_SETAFFINITY = 5195 - SYS_SCHED_GETAFFINITY = 5196 - SYS_CACHEFLUSH = 5197 - SYS_CACHECTL = 5198 - SYS_SYSMIPS = 5199 - SYS_IO_SETUP = 5200 - SYS_IO_DESTROY = 5201 - SYS_IO_GETEVENTS = 5202 - SYS_IO_SUBMIT = 5203 - SYS_IO_CANCEL = 5204 - SYS_EXIT_GROUP = 5205 - SYS_LOOKUP_DCOOKIE = 5206 - SYS_EPOLL_CREATE = 5207 - SYS_EPOLL_CTL = 5208 - SYS_EPOLL_WAIT = 5209 - SYS_REMAP_FILE_PAGES = 5210 - SYS_RT_SIGRETURN = 5211 - SYS_SET_TID_ADDRESS = 5212 - SYS_RESTART_SYSCALL = 5213 - SYS_SEMTIMEDOP = 5214 - SYS_FADVISE64 = 5215 - SYS_TIMER_CREATE = 5216 - SYS_TIMER_SETTIME = 5217 - SYS_TIMER_GETTIME = 5218 - SYS_TIMER_GETOVERRUN = 5219 - SYS_TIMER_DELETE = 5220 - SYS_CLOCK_SETTIME = 5221 - SYS_CLOCK_GETTIME = 5222 - SYS_CLOCK_GETRES = 5223 - SYS_CLOCK_NANOSLEEP = 5224 - SYS_TGKILL = 5225 - SYS_UTIMES = 5226 - SYS_MBIND = 5227 - SYS_GET_MEMPOLICY = 5228 - SYS_SET_MEMPOLICY = 5229 - SYS_MQ_OPEN = 5230 - SYS_MQ_UNLINK = 5231 - SYS_MQ_TIMEDSEND = 5232 - SYS_MQ_TIMEDRECEIVE = 5233 - SYS_MQ_NOTIFY = 5234 - SYS_MQ_GETSETATTR = 5235 - SYS_VSERVER = 5236 - SYS_WAITID = 5237 - SYS_ADD_KEY = 5239 - SYS_REQUEST_KEY = 5240 - SYS_KEYCTL = 5241 - SYS_SET_THREAD_AREA = 5242 - SYS_INOTIFY_INIT = 5243 - SYS_INOTIFY_ADD_WATCH = 5244 - SYS_INOTIFY_RM_WATCH = 5245 - SYS_MIGRATE_PAGES = 5246 - SYS_OPENAT = 5247 - SYS_MKDIRAT = 5248 - SYS_MKNODAT = 5249 - SYS_FCHOWNAT = 5250 - SYS_FUTIMESAT = 5251 - SYS_NEWFSTATAT = 5252 - SYS_UNLINKAT = 5253 - SYS_RENAMEAT = 5254 - SYS_LINKAT = 5255 - SYS_SYMLINKAT = 5256 - SYS_READLINKAT = 5257 - SYS_FCHMODAT = 5258 - SYS_FACCESSAT = 5259 - SYS_PSELECT6 = 5260 - SYS_PPOLL = 5261 - SYS_UNSHARE = 5262 - SYS_SPLICE = 5263 - SYS_SYNC_FILE_RANGE = 5264 - SYS_TEE = 5265 - SYS_VMSPLICE = 5266 - SYS_MOVE_PAGES = 5267 - SYS_SET_ROBUST_LIST = 5268 - SYS_GET_ROBUST_LIST = 5269 - SYS_KEXEC_LOAD = 5270 - SYS_GETCPU = 5271 - SYS_EPOLL_PWAIT = 5272 - SYS_IOPRIO_SET = 5273 - SYS_IOPRIO_GET = 5274 - SYS_UTIMENSAT = 5275 - SYS_SIGNALFD = 5276 - SYS_TIMERFD = 5277 - SYS_EVENTFD = 5278 - SYS_FALLOCATE = 5279 - SYS_TIMERFD_CREATE = 5280 - SYS_TIMERFD_GETTIME = 5281 - SYS_TIMERFD_SETTIME = 5282 - SYS_SIGNALFD4 = 5283 - SYS_EVENTFD2 = 5284 - SYS_EPOLL_CREATE1 = 5285 - SYS_DUP3 = 5286 - SYS_PIPE2 = 5287 - SYS_INOTIFY_INIT1 = 5288 - SYS_PREADV = 5289 - SYS_PWRITEV = 5290 - SYS_RT_TGSIGQUEUEINFO = 5291 - SYS_PERF_EVENT_OPEN = 5292 - SYS_ACCEPT4 = 5293 - SYS_RECVMMSG = 5294 - SYS_FANOTIFY_INIT = 5295 - SYS_FANOTIFY_MARK = 5296 - SYS_PRLIMIT64 = 5297 - SYS_NAME_TO_HANDLE_AT = 5298 - SYS_OPEN_BY_HANDLE_AT = 5299 - SYS_CLOCK_ADJTIME = 5300 - SYS_SYNCFS = 5301 - SYS_SENDMMSG = 5302 - SYS_SETNS = 5303 - SYS_PROCESS_VM_READV = 5304 - SYS_PROCESS_VM_WRITEV = 5305 - SYS_KCMP = 5306 - SYS_FINIT_MODULE = 5307 - SYS_GETDENTS64 = 5308 - SYS_SCHED_SETATTR = 5309 - SYS_SCHED_GETATTR = 5310 - SYS_RENAMEAT2 = 5311 - SYS_SECCOMP = 5312 - SYS_GETRANDOM = 5313 - SYS_MEMFD_CREATE = 5314 - SYS_BPF = 5315 - SYS_EXECVEAT = 5316 - SYS_USERFAULTFD = 5317 - SYS_MEMBARRIER = 5318 - SYS_MLOCK2 = 5319 - SYS_COPY_FILE_RANGE = 5320 - SYS_PREADV2 = 5321 - SYS_PWRITEV2 = 5322 - SYS_PKEY_MPROTECT = 5323 - SYS_PKEY_ALLOC = 5324 - SYS_PKEY_FREE = 5325 - SYS_STATX = 5326 - SYS_RSEQ = 5327 - SYS_IO_PGETEVENTS = 5328 - SYS_PIDFD_SEND_SIGNAL = 5424 - SYS_IO_URING_SETUP = 5425 - SYS_IO_URING_ENTER = 5426 - SYS_IO_URING_REGISTER = 5427 - SYS_OPEN_TREE = 5428 - SYS_MOVE_MOUNT = 5429 - SYS_FSOPEN = 5430 - SYS_FSCONFIG = 5431 - SYS_FSMOUNT = 5432 - SYS_FSPICK = 5433 - SYS_PIDFD_OPEN = 5434 - SYS_CLONE3 = 5435 - SYS_CLOSE_RANGE = 5436 - SYS_OPENAT2 = 5437 - SYS_PIDFD_GETFD = 5438 - SYS_FACCESSAT2 = 5439 - SYS_PROCESS_MADVISE = 5440 - SYS_EPOLL_PWAIT2 = 5441 - SYS_MOUNT_SETATTR = 5442 + SYS_READ = 5000 + SYS_WRITE = 5001 + SYS_OPEN = 5002 + SYS_CLOSE = 5003 + SYS_STAT = 5004 + SYS_FSTAT = 5005 + SYS_LSTAT = 5006 + SYS_POLL = 5007 + SYS_LSEEK = 5008 + SYS_MMAP = 5009 + SYS_MPROTECT = 5010 + SYS_MUNMAP = 5011 + SYS_BRK = 5012 + SYS_RT_SIGACTION = 5013 + SYS_RT_SIGPROCMASK = 5014 + SYS_IOCTL = 5015 + SYS_PREAD64 = 5016 + SYS_PWRITE64 = 5017 + SYS_READV = 5018 + SYS_WRITEV = 5019 + SYS_ACCESS = 5020 + SYS_PIPE = 5021 + SYS__NEWSELECT = 5022 + SYS_SCHED_YIELD = 5023 + SYS_MREMAP = 5024 + SYS_MSYNC = 5025 + SYS_MINCORE = 5026 + SYS_MADVISE = 5027 + SYS_SHMGET = 5028 + SYS_SHMAT = 5029 + SYS_SHMCTL = 5030 + SYS_DUP = 5031 + SYS_DUP2 = 5032 + SYS_PAUSE = 5033 + SYS_NANOSLEEP = 5034 + SYS_GETITIMER = 5035 + SYS_SETITIMER = 5036 + SYS_ALARM = 5037 + SYS_GETPID = 5038 + SYS_SENDFILE = 5039 + SYS_SOCKET = 5040 + SYS_CONNECT = 5041 + SYS_ACCEPT = 5042 + SYS_SENDTO = 5043 + SYS_RECVFROM = 5044 + SYS_SENDMSG = 5045 + SYS_RECVMSG = 5046 + SYS_SHUTDOWN = 5047 + SYS_BIND = 5048 + SYS_LISTEN = 5049 + SYS_GETSOCKNAME = 5050 + SYS_GETPEERNAME = 5051 + SYS_SOCKETPAIR = 5052 + SYS_SETSOCKOPT = 5053 + SYS_GETSOCKOPT = 5054 + SYS_CLONE = 5055 + SYS_FORK = 5056 + SYS_EXECVE = 5057 + SYS_EXIT = 5058 + SYS_WAIT4 = 5059 + SYS_KILL = 5060 + SYS_UNAME = 5061 + SYS_SEMGET = 5062 + SYS_SEMOP = 5063 + SYS_SEMCTL = 5064 + SYS_SHMDT = 5065 + SYS_MSGGET = 5066 + SYS_MSGSND = 5067 + SYS_MSGRCV = 5068 + SYS_MSGCTL = 5069 + SYS_FCNTL = 5070 + SYS_FLOCK = 5071 + SYS_FSYNC = 5072 + SYS_FDATASYNC = 5073 + SYS_TRUNCATE = 5074 + SYS_FTRUNCATE = 5075 + SYS_GETDENTS = 5076 + SYS_GETCWD = 5077 + SYS_CHDIR = 5078 + SYS_FCHDIR = 5079 + SYS_RENAME = 5080 + SYS_MKDIR = 5081 + SYS_RMDIR = 5082 + SYS_CREAT = 5083 + SYS_LINK = 5084 + SYS_UNLINK = 5085 + SYS_SYMLINK = 5086 + SYS_READLINK = 5087 + SYS_CHMOD = 5088 + SYS_FCHMOD = 5089 + SYS_CHOWN = 5090 + SYS_FCHOWN = 5091 + SYS_LCHOWN = 5092 + SYS_UMASK = 5093 + SYS_GETTIMEOFDAY = 5094 + SYS_GETRLIMIT = 5095 + SYS_GETRUSAGE = 5096 + SYS_SYSINFO = 5097 + SYS_TIMES = 5098 + SYS_PTRACE = 5099 + SYS_GETUID = 5100 + SYS_SYSLOG = 5101 + SYS_GETGID = 5102 + SYS_SETUID = 5103 + SYS_SETGID = 5104 + SYS_GETEUID = 5105 + SYS_GETEGID = 5106 + SYS_SETPGID = 5107 + SYS_GETPPID = 5108 + SYS_GETPGRP = 5109 + SYS_SETSID = 5110 + SYS_SETREUID = 5111 + SYS_SETREGID = 5112 + SYS_GETGROUPS = 5113 + SYS_SETGROUPS = 5114 + SYS_SETRESUID = 5115 + SYS_GETRESUID = 5116 + SYS_SETRESGID = 5117 + SYS_GETRESGID = 5118 + SYS_GETPGID = 5119 + SYS_SETFSUID = 5120 + SYS_SETFSGID = 5121 + SYS_GETSID = 5122 + SYS_CAPGET = 5123 + SYS_CAPSET = 5124 + SYS_RT_SIGPENDING = 5125 + SYS_RT_SIGTIMEDWAIT = 5126 + SYS_RT_SIGQUEUEINFO = 5127 + SYS_RT_SIGSUSPEND = 5128 + SYS_SIGALTSTACK = 5129 + SYS_UTIME = 5130 + SYS_MKNOD = 5131 + SYS_PERSONALITY = 5132 + SYS_USTAT = 5133 + SYS_STATFS = 5134 + SYS_FSTATFS = 5135 + SYS_SYSFS = 5136 + SYS_GETPRIORITY = 5137 + SYS_SETPRIORITY = 5138 + SYS_SCHED_SETPARAM = 5139 + SYS_SCHED_GETPARAM = 5140 + SYS_SCHED_SETSCHEDULER = 5141 + SYS_SCHED_GETSCHEDULER = 5142 + SYS_SCHED_GET_PRIORITY_MAX = 5143 + SYS_SCHED_GET_PRIORITY_MIN = 5144 + SYS_SCHED_RR_GET_INTERVAL = 5145 + SYS_MLOCK = 5146 + SYS_MUNLOCK = 5147 + SYS_MLOCKALL = 5148 + SYS_MUNLOCKALL = 5149 + SYS_VHANGUP = 5150 + SYS_PIVOT_ROOT = 5151 + SYS__SYSCTL = 5152 + SYS_PRCTL = 5153 + SYS_ADJTIMEX = 5154 + SYS_SETRLIMIT = 5155 + SYS_CHROOT = 5156 + SYS_SYNC = 5157 + SYS_ACCT = 5158 + SYS_SETTIMEOFDAY = 5159 + SYS_MOUNT = 5160 + SYS_UMOUNT2 = 5161 + SYS_SWAPON = 5162 + SYS_SWAPOFF = 5163 + SYS_REBOOT = 5164 + SYS_SETHOSTNAME = 5165 + SYS_SETDOMAINNAME = 5166 + SYS_CREATE_MODULE = 5167 + SYS_INIT_MODULE = 5168 + SYS_DELETE_MODULE = 5169 + SYS_GET_KERNEL_SYMS = 5170 + SYS_QUERY_MODULE = 5171 + SYS_QUOTACTL = 5172 + SYS_NFSSERVCTL = 5173 + SYS_GETPMSG = 5174 + SYS_PUTPMSG = 5175 + SYS_AFS_SYSCALL = 5176 + SYS_RESERVED177 = 5177 + SYS_GETTID = 5178 + SYS_READAHEAD = 5179 + SYS_SETXATTR = 5180 + SYS_LSETXATTR = 5181 + SYS_FSETXATTR = 5182 + SYS_GETXATTR = 5183 + SYS_LGETXATTR = 5184 + SYS_FGETXATTR = 5185 + SYS_LISTXATTR = 5186 + SYS_LLISTXATTR = 5187 + SYS_FLISTXATTR = 5188 + SYS_REMOVEXATTR = 5189 + SYS_LREMOVEXATTR = 5190 + SYS_FREMOVEXATTR = 5191 + SYS_TKILL = 5192 + SYS_RESERVED193 = 5193 + SYS_FUTEX = 5194 + SYS_SCHED_SETAFFINITY = 5195 + SYS_SCHED_GETAFFINITY = 5196 + SYS_CACHEFLUSH = 5197 + SYS_CACHECTL = 5198 + SYS_SYSMIPS = 5199 + SYS_IO_SETUP = 5200 + SYS_IO_DESTROY = 5201 + SYS_IO_GETEVENTS = 5202 + SYS_IO_SUBMIT = 5203 + SYS_IO_CANCEL = 5204 + SYS_EXIT_GROUP = 5205 + SYS_LOOKUP_DCOOKIE = 5206 + SYS_EPOLL_CREATE = 5207 + SYS_EPOLL_CTL = 5208 + SYS_EPOLL_WAIT = 5209 + SYS_REMAP_FILE_PAGES = 5210 + SYS_RT_SIGRETURN = 5211 + SYS_SET_TID_ADDRESS = 5212 + SYS_RESTART_SYSCALL = 5213 + SYS_SEMTIMEDOP = 5214 + SYS_FADVISE64 = 5215 + SYS_TIMER_CREATE = 5216 + SYS_TIMER_SETTIME = 5217 + SYS_TIMER_GETTIME = 5218 + SYS_TIMER_GETOVERRUN = 5219 + SYS_TIMER_DELETE = 5220 + SYS_CLOCK_SETTIME = 5221 + SYS_CLOCK_GETTIME = 5222 + SYS_CLOCK_GETRES = 5223 + SYS_CLOCK_NANOSLEEP = 5224 + SYS_TGKILL = 5225 + SYS_UTIMES = 5226 + SYS_MBIND = 5227 + SYS_GET_MEMPOLICY = 5228 + SYS_SET_MEMPOLICY = 5229 + SYS_MQ_OPEN = 5230 + SYS_MQ_UNLINK = 5231 + SYS_MQ_TIMEDSEND = 5232 + SYS_MQ_TIMEDRECEIVE = 5233 + SYS_MQ_NOTIFY = 5234 + SYS_MQ_GETSETATTR = 5235 + SYS_VSERVER = 5236 + SYS_WAITID = 5237 + SYS_ADD_KEY = 5239 + SYS_REQUEST_KEY = 5240 + SYS_KEYCTL = 5241 + SYS_SET_THREAD_AREA = 5242 + SYS_INOTIFY_INIT = 5243 + SYS_INOTIFY_ADD_WATCH = 5244 + SYS_INOTIFY_RM_WATCH = 5245 + SYS_MIGRATE_PAGES = 5246 + SYS_OPENAT = 5247 + SYS_MKDIRAT = 5248 + SYS_MKNODAT = 5249 + SYS_FCHOWNAT = 5250 + SYS_FUTIMESAT = 5251 + SYS_NEWFSTATAT = 5252 + SYS_UNLINKAT = 5253 + SYS_RENAMEAT = 5254 + SYS_LINKAT = 5255 + SYS_SYMLINKAT = 5256 + SYS_READLINKAT = 5257 + SYS_FCHMODAT = 5258 + SYS_FACCESSAT = 5259 + SYS_PSELECT6 = 5260 + SYS_PPOLL = 5261 + SYS_UNSHARE = 5262 + SYS_SPLICE = 5263 + SYS_SYNC_FILE_RANGE = 5264 + SYS_TEE = 5265 + SYS_VMSPLICE = 5266 + SYS_MOVE_PAGES = 5267 + SYS_SET_ROBUST_LIST = 5268 + SYS_GET_ROBUST_LIST = 5269 + SYS_KEXEC_LOAD = 5270 + SYS_GETCPU = 5271 + SYS_EPOLL_PWAIT = 5272 + SYS_IOPRIO_SET = 5273 + SYS_IOPRIO_GET = 5274 + SYS_UTIMENSAT = 5275 + SYS_SIGNALFD = 5276 + SYS_TIMERFD = 5277 + SYS_EVENTFD = 5278 + SYS_FALLOCATE = 5279 + SYS_TIMERFD_CREATE = 5280 + SYS_TIMERFD_GETTIME = 5281 + SYS_TIMERFD_SETTIME = 5282 + SYS_SIGNALFD4 = 5283 + SYS_EVENTFD2 = 5284 + SYS_EPOLL_CREATE1 = 5285 + SYS_DUP3 = 5286 + SYS_PIPE2 = 5287 + SYS_INOTIFY_INIT1 = 5288 + SYS_PREADV = 5289 + SYS_PWRITEV = 5290 + SYS_RT_TGSIGQUEUEINFO = 5291 + SYS_PERF_EVENT_OPEN = 5292 + SYS_ACCEPT4 = 5293 + SYS_RECVMMSG = 5294 + SYS_FANOTIFY_INIT = 5295 + SYS_FANOTIFY_MARK = 5296 + SYS_PRLIMIT64 = 5297 + SYS_NAME_TO_HANDLE_AT = 5298 + SYS_OPEN_BY_HANDLE_AT = 5299 + SYS_CLOCK_ADJTIME = 5300 + SYS_SYNCFS = 5301 + SYS_SENDMMSG = 5302 + SYS_SETNS = 5303 + SYS_PROCESS_VM_READV = 5304 + SYS_PROCESS_VM_WRITEV = 5305 + SYS_KCMP = 5306 + SYS_FINIT_MODULE = 5307 + SYS_GETDENTS64 = 5308 + SYS_SCHED_SETATTR = 5309 + SYS_SCHED_GETATTR = 5310 + SYS_RENAMEAT2 = 5311 + SYS_SECCOMP = 5312 + SYS_GETRANDOM = 5313 + SYS_MEMFD_CREATE = 5314 + SYS_BPF = 5315 + SYS_EXECVEAT = 5316 + SYS_USERFAULTFD = 5317 + SYS_MEMBARRIER = 5318 + SYS_MLOCK2 = 5319 + SYS_COPY_FILE_RANGE = 5320 + SYS_PREADV2 = 5321 + SYS_PWRITEV2 = 5322 + SYS_PKEY_MPROTECT = 5323 + SYS_PKEY_ALLOC = 5324 + SYS_PKEY_FREE = 5325 + SYS_STATX = 5326 + SYS_RSEQ = 5327 + SYS_IO_PGETEVENTS = 5328 + SYS_PIDFD_SEND_SIGNAL = 5424 + SYS_IO_URING_SETUP = 5425 + SYS_IO_URING_ENTER = 5426 + SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 + SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 + SYS_FACCESSAT2 = 5439 + SYS_PROCESS_MADVISE = 5440 + SYS_EPOLL_PWAIT2 = 5441 + SYS_MOUNT_SETATTR = 5442 + SYS_QUOTACTL_FD = 5443 + SYS_LANDLOCK_CREATE_RULESET = 5444 + SYS_LANDLOCK_ADD_RULE = 5445 + SYS_LANDLOCK_RESTRICT_SELF = 5446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 46077689a..45082dd67 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -7,351 +7,355 @@ package unix const ( - SYS_READ = 5000 - SYS_WRITE = 5001 - SYS_OPEN = 5002 - SYS_CLOSE = 5003 - SYS_STAT = 5004 - SYS_FSTAT = 5005 - SYS_LSTAT = 5006 - SYS_POLL = 5007 - SYS_LSEEK = 5008 - SYS_MMAP = 5009 - SYS_MPROTECT = 5010 - SYS_MUNMAP = 5011 - SYS_BRK = 5012 - SYS_RT_SIGACTION = 5013 - SYS_RT_SIGPROCMASK = 5014 - SYS_IOCTL = 5015 - SYS_PREAD64 = 5016 - SYS_PWRITE64 = 5017 - SYS_READV = 5018 - SYS_WRITEV = 5019 - SYS_ACCESS = 5020 - SYS_PIPE = 5021 - SYS__NEWSELECT = 5022 - SYS_SCHED_YIELD = 5023 - SYS_MREMAP = 5024 - SYS_MSYNC = 5025 - SYS_MINCORE = 5026 - SYS_MADVISE = 5027 - SYS_SHMGET = 5028 - SYS_SHMAT = 5029 - SYS_SHMCTL = 5030 - SYS_DUP = 5031 - SYS_DUP2 = 5032 - SYS_PAUSE = 5033 - SYS_NANOSLEEP = 5034 - SYS_GETITIMER = 5035 - SYS_SETITIMER = 5036 - SYS_ALARM = 5037 - SYS_GETPID = 5038 - SYS_SENDFILE = 5039 - SYS_SOCKET = 5040 - SYS_CONNECT = 5041 - SYS_ACCEPT = 5042 - SYS_SENDTO = 5043 - SYS_RECVFROM = 5044 - SYS_SENDMSG = 5045 - SYS_RECVMSG = 5046 - SYS_SHUTDOWN = 5047 - SYS_BIND = 5048 - SYS_LISTEN = 5049 - SYS_GETSOCKNAME = 5050 - SYS_GETPEERNAME = 5051 - SYS_SOCKETPAIR = 5052 - SYS_SETSOCKOPT = 5053 - SYS_GETSOCKOPT = 5054 - SYS_CLONE = 5055 - SYS_FORK = 5056 - SYS_EXECVE = 5057 - SYS_EXIT = 5058 - SYS_WAIT4 = 5059 - SYS_KILL = 5060 - SYS_UNAME = 5061 - SYS_SEMGET = 5062 - SYS_SEMOP = 5063 - SYS_SEMCTL = 5064 - SYS_SHMDT = 5065 - SYS_MSGGET = 5066 - SYS_MSGSND = 5067 - SYS_MSGRCV = 5068 - SYS_MSGCTL = 5069 - SYS_FCNTL = 5070 - SYS_FLOCK = 5071 - SYS_FSYNC = 5072 - SYS_FDATASYNC = 5073 - SYS_TRUNCATE = 5074 - SYS_FTRUNCATE = 5075 - SYS_GETDENTS = 5076 - SYS_GETCWD = 5077 - SYS_CHDIR = 5078 - SYS_FCHDIR = 5079 - SYS_RENAME = 5080 - SYS_MKDIR = 5081 - SYS_RMDIR = 5082 - SYS_CREAT = 5083 - SYS_LINK = 5084 - SYS_UNLINK = 5085 - SYS_SYMLINK = 5086 - SYS_READLINK = 5087 - SYS_CHMOD = 5088 - SYS_FCHMOD = 5089 - SYS_CHOWN = 5090 - SYS_FCHOWN = 5091 - SYS_LCHOWN = 5092 - SYS_UMASK = 5093 - SYS_GETTIMEOFDAY = 5094 - SYS_GETRLIMIT = 5095 - SYS_GETRUSAGE = 5096 - SYS_SYSINFO = 5097 - SYS_TIMES = 5098 - SYS_PTRACE = 5099 - SYS_GETUID = 5100 - SYS_SYSLOG = 5101 - SYS_GETGID = 5102 - SYS_SETUID = 5103 - SYS_SETGID = 5104 - SYS_GETEUID = 5105 - SYS_GETEGID = 5106 - SYS_SETPGID = 5107 - SYS_GETPPID = 5108 - SYS_GETPGRP = 5109 - SYS_SETSID = 5110 - SYS_SETREUID = 5111 - SYS_SETREGID = 5112 - SYS_GETGROUPS = 5113 - SYS_SETGROUPS = 5114 - SYS_SETRESUID = 5115 - SYS_GETRESUID = 5116 - SYS_SETRESGID = 5117 - SYS_GETRESGID = 5118 - SYS_GETPGID = 5119 - SYS_SETFSUID = 5120 - SYS_SETFSGID = 5121 - SYS_GETSID = 5122 - SYS_CAPGET = 5123 - SYS_CAPSET = 5124 - SYS_RT_SIGPENDING = 5125 - SYS_RT_SIGTIMEDWAIT = 5126 - SYS_RT_SIGQUEUEINFO = 5127 - SYS_RT_SIGSUSPEND = 5128 - SYS_SIGALTSTACK = 5129 - SYS_UTIME = 5130 - SYS_MKNOD = 5131 - SYS_PERSONALITY = 5132 - SYS_USTAT = 5133 - SYS_STATFS = 5134 - SYS_FSTATFS = 5135 - SYS_SYSFS = 5136 - SYS_GETPRIORITY = 5137 - SYS_SETPRIORITY = 5138 - SYS_SCHED_SETPARAM = 5139 - SYS_SCHED_GETPARAM = 5140 - SYS_SCHED_SETSCHEDULER = 5141 - SYS_SCHED_GETSCHEDULER = 5142 - SYS_SCHED_GET_PRIORITY_MAX = 5143 - SYS_SCHED_GET_PRIORITY_MIN = 5144 - SYS_SCHED_RR_GET_INTERVAL = 5145 - SYS_MLOCK = 5146 - SYS_MUNLOCK = 5147 - SYS_MLOCKALL = 5148 - SYS_MUNLOCKALL = 5149 - SYS_VHANGUP = 5150 - SYS_PIVOT_ROOT = 5151 - SYS__SYSCTL = 5152 - SYS_PRCTL = 5153 - SYS_ADJTIMEX = 5154 - SYS_SETRLIMIT = 5155 - SYS_CHROOT = 5156 - SYS_SYNC = 5157 - SYS_ACCT = 5158 - SYS_SETTIMEOFDAY = 5159 - SYS_MOUNT = 5160 - SYS_UMOUNT2 = 5161 - SYS_SWAPON = 5162 - SYS_SWAPOFF = 5163 - SYS_REBOOT = 5164 - SYS_SETHOSTNAME = 5165 - SYS_SETDOMAINNAME = 5166 - SYS_CREATE_MODULE = 5167 - SYS_INIT_MODULE = 5168 - SYS_DELETE_MODULE = 5169 - SYS_GET_KERNEL_SYMS = 5170 - SYS_QUERY_MODULE = 5171 - SYS_QUOTACTL = 5172 - SYS_NFSSERVCTL = 5173 - SYS_GETPMSG = 5174 - SYS_PUTPMSG = 5175 - SYS_AFS_SYSCALL = 5176 - SYS_RESERVED177 = 5177 - SYS_GETTID = 5178 - SYS_READAHEAD = 5179 - SYS_SETXATTR = 5180 - SYS_LSETXATTR = 5181 - SYS_FSETXATTR = 5182 - SYS_GETXATTR = 5183 - SYS_LGETXATTR = 5184 - SYS_FGETXATTR = 5185 - SYS_LISTXATTR = 5186 - SYS_LLISTXATTR = 5187 - SYS_FLISTXATTR = 5188 - SYS_REMOVEXATTR = 5189 - SYS_LREMOVEXATTR = 5190 - SYS_FREMOVEXATTR = 5191 - SYS_TKILL = 5192 - SYS_RESERVED193 = 5193 - SYS_FUTEX = 5194 - SYS_SCHED_SETAFFINITY = 5195 - SYS_SCHED_GETAFFINITY = 5196 - SYS_CACHEFLUSH = 5197 - SYS_CACHECTL = 5198 - SYS_SYSMIPS = 5199 - SYS_IO_SETUP = 5200 - SYS_IO_DESTROY = 5201 - SYS_IO_GETEVENTS = 5202 - SYS_IO_SUBMIT = 5203 - SYS_IO_CANCEL = 5204 - SYS_EXIT_GROUP = 5205 - SYS_LOOKUP_DCOOKIE = 5206 - SYS_EPOLL_CREATE = 5207 - SYS_EPOLL_CTL = 5208 - SYS_EPOLL_WAIT = 5209 - SYS_REMAP_FILE_PAGES = 5210 - SYS_RT_SIGRETURN = 5211 - SYS_SET_TID_ADDRESS = 5212 - SYS_RESTART_SYSCALL = 5213 - SYS_SEMTIMEDOP = 5214 - SYS_FADVISE64 = 5215 - SYS_TIMER_CREATE = 5216 - SYS_TIMER_SETTIME = 5217 - SYS_TIMER_GETTIME = 5218 - SYS_TIMER_GETOVERRUN = 5219 - SYS_TIMER_DELETE = 5220 - SYS_CLOCK_SETTIME = 5221 - SYS_CLOCK_GETTIME = 5222 - SYS_CLOCK_GETRES = 5223 - SYS_CLOCK_NANOSLEEP = 5224 - SYS_TGKILL = 5225 - SYS_UTIMES = 5226 - SYS_MBIND = 5227 - SYS_GET_MEMPOLICY = 5228 - SYS_SET_MEMPOLICY = 5229 - SYS_MQ_OPEN = 5230 - SYS_MQ_UNLINK = 5231 - SYS_MQ_TIMEDSEND = 5232 - SYS_MQ_TIMEDRECEIVE = 5233 - SYS_MQ_NOTIFY = 5234 - SYS_MQ_GETSETATTR = 5235 - SYS_VSERVER = 5236 - SYS_WAITID = 5237 - SYS_ADD_KEY = 5239 - SYS_REQUEST_KEY = 5240 - SYS_KEYCTL = 5241 - SYS_SET_THREAD_AREA = 5242 - SYS_INOTIFY_INIT = 5243 - SYS_INOTIFY_ADD_WATCH = 5244 - SYS_INOTIFY_RM_WATCH = 5245 - SYS_MIGRATE_PAGES = 5246 - SYS_OPENAT = 5247 - SYS_MKDIRAT = 5248 - SYS_MKNODAT = 5249 - SYS_FCHOWNAT = 5250 - SYS_FUTIMESAT = 5251 - SYS_NEWFSTATAT = 5252 - SYS_UNLINKAT = 5253 - SYS_RENAMEAT = 5254 - SYS_LINKAT = 5255 - SYS_SYMLINKAT = 5256 - SYS_READLINKAT = 5257 - SYS_FCHMODAT = 5258 - SYS_FACCESSAT = 5259 - SYS_PSELECT6 = 5260 - SYS_PPOLL = 5261 - SYS_UNSHARE = 5262 - SYS_SPLICE = 5263 - SYS_SYNC_FILE_RANGE = 5264 - SYS_TEE = 5265 - SYS_VMSPLICE = 5266 - SYS_MOVE_PAGES = 5267 - SYS_SET_ROBUST_LIST = 5268 - SYS_GET_ROBUST_LIST = 5269 - SYS_KEXEC_LOAD = 5270 - SYS_GETCPU = 5271 - SYS_EPOLL_PWAIT = 5272 - SYS_IOPRIO_SET = 5273 - SYS_IOPRIO_GET = 5274 - SYS_UTIMENSAT = 5275 - SYS_SIGNALFD = 5276 - SYS_TIMERFD = 5277 - SYS_EVENTFD = 5278 - SYS_FALLOCATE = 5279 - SYS_TIMERFD_CREATE = 5280 - SYS_TIMERFD_GETTIME = 5281 - SYS_TIMERFD_SETTIME = 5282 - SYS_SIGNALFD4 = 5283 - SYS_EVENTFD2 = 5284 - SYS_EPOLL_CREATE1 = 5285 - SYS_DUP3 = 5286 - SYS_PIPE2 = 5287 - SYS_INOTIFY_INIT1 = 5288 - SYS_PREADV = 5289 - SYS_PWRITEV = 5290 - SYS_RT_TGSIGQUEUEINFO = 5291 - SYS_PERF_EVENT_OPEN = 5292 - SYS_ACCEPT4 = 5293 - SYS_RECVMMSG = 5294 - SYS_FANOTIFY_INIT = 5295 - SYS_FANOTIFY_MARK = 5296 - SYS_PRLIMIT64 = 5297 - SYS_NAME_TO_HANDLE_AT = 5298 - SYS_OPEN_BY_HANDLE_AT = 5299 - SYS_CLOCK_ADJTIME = 5300 - SYS_SYNCFS = 5301 - SYS_SENDMMSG = 5302 - SYS_SETNS = 5303 - SYS_PROCESS_VM_READV = 5304 - SYS_PROCESS_VM_WRITEV = 5305 - SYS_KCMP = 5306 - SYS_FINIT_MODULE = 5307 - SYS_GETDENTS64 = 5308 - SYS_SCHED_SETATTR = 5309 - SYS_SCHED_GETATTR = 5310 - SYS_RENAMEAT2 = 5311 - SYS_SECCOMP = 5312 - SYS_GETRANDOM = 5313 - SYS_MEMFD_CREATE = 5314 - SYS_BPF = 5315 - SYS_EXECVEAT = 5316 - SYS_USERFAULTFD = 5317 - SYS_MEMBARRIER = 5318 - SYS_MLOCK2 = 5319 - SYS_COPY_FILE_RANGE = 5320 - SYS_PREADV2 = 5321 - SYS_PWRITEV2 = 5322 - SYS_PKEY_MPROTECT = 5323 - SYS_PKEY_ALLOC = 5324 - SYS_PKEY_FREE = 5325 - SYS_STATX = 5326 - SYS_RSEQ = 5327 - SYS_IO_PGETEVENTS = 5328 - SYS_PIDFD_SEND_SIGNAL = 5424 - SYS_IO_URING_SETUP = 5425 - SYS_IO_URING_ENTER = 5426 - SYS_IO_URING_REGISTER = 5427 - SYS_OPEN_TREE = 5428 - SYS_MOVE_MOUNT = 5429 - SYS_FSOPEN = 5430 - SYS_FSCONFIG = 5431 - SYS_FSMOUNT = 5432 - SYS_FSPICK = 5433 - SYS_PIDFD_OPEN = 5434 - SYS_CLONE3 = 5435 - SYS_CLOSE_RANGE = 5436 - SYS_OPENAT2 = 5437 - SYS_PIDFD_GETFD = 5438 - SYS_FACCESSAT2 = 5439 - SYS_PROCESS_MADVISE = 5440 - SYS_EPOLL_PWAIT2 = 5441 - SYS_MOUNT_SETATTR = 5442 + SYS_READ = 5000 + SYS_WRITE = 5001 + SYS_OPEN = 5002 + SYS_CLOSE = 5003 + SYS_STAT = 5004 + SYS_FSTAT = 5005 + SYS_LSTAT = 5006 + SYS_POLL = 5007 + SYS_LSEEK = 5008 + SYS_MMAP = 5009 + SYS_MPROTECT = 5010 + SYS_MUNMAP = 5011 + SYS_BRK = 5012 + SYS_RT_SIGACTION = 5013 + SYS_RT_SIGPROCMASK = 5014 + SYS_IOCTL = 5015 + SYS_PREAD64 = 5016 + SYS_PWRITE64 = 5017 + SYS_READV = 5018 + SYS_WRITEV = 5019 + SYS_ACCESS = 5020 + SYS_PIPE = 5021 + SYS__NEWSELECT = 5022 + SYS_SCHED_YIELD = 5023 + SYS_MREMAP = 5024 + SYS_MSYNC = 5025 + SYS_MINCORE = 5026 + SYS_MADVISE = 5027 + SYS_SHMGET = 5028 + SYS_SHMAT = 5029 + SYS_SHMCTL = 5030 + SYS_DUP = 5031 + SYS_DUP2 = 5032 + SYS_PAUSE = 5033 + SYS_NANOSLEEP = 5034 + SYS_GETITIMER = 5035 + SYS_SETITIMER = 5036 + SYS_ALARM = 5037 + SYS_GETPID = 5038 + SYS_SENDFILE = 5039 + SYS_SOCKET = 5040 + SYS_CONNECT = 5041 + SYS_ACCEPT = 5042 + SYS_SENDTO = 5043 + SYS_RECVFROM = 5044 + SYS_SENDMSG = 5045 + SYS_RECVMSG = 5046 + SYS_SHUTDOWN = 5047 + SYS_BIND = 5048 + SYS_LISTEN = 5049 + SYS_GETSOCKNAME = 5050 + SYS_GETPEERNAME = 5051 + SYS_SOCKETPAIR = 5052 + SYS_SETSOCKOPT = 5053 + SYS_GETSOCKOPT = 5054 + SYS_CLONE = 5055 + SYS_FORK = 5056 + SYS_EXECVE = 5057 + SYS_EXIT = 5058 + SYS_WAIT4 = 5059 + SYS_KILL = 5060 + SYS_UNAME = 5061 + SYS_SEMGET = 5062 + SYS_SEMOP = 5063 + SYS_SEMCTL = 5064 + SYS_SHMDT = 5065 + SYS_MSGGET = 5066 + SYS_MSGSND = 5067 + SYS_MSGRCV = 5068 + SYS_MSGCTL = 5069 + SYS_FCNTL = 5070 + SYS_FLOCK = 5071 + SYS_FSYNC = 5072 + SYS_FDATASYNC = 5073 + SYS_TRUNCATE = 5074 + SYS_FTRUNCATE = 5075 + SYS_GETDENTS = 5076 + SYS_GETCWD = 5077 + SYS_CHDIR = 5078 + SYS_FCHDIR = 5079 + SYS_RENAME = 5080 + SYS_MKDIR = 5081 + SYS_RMDIR = 5082 + SYS_CREAT = 5083 + SYS_LINK = 5084 + SYS_UNLINK = 5085 + SYS_SYMLINK = 5086 + SYS_READLINK = 5087 + SYS_CHMOD = 5088 + SYS_FCHMOD = 5089 + SYS_CHOWN = 5090 + SYS_FCHOWN = 5091 + SYS_LCHOWN = 5092 + SYS_UMASK = 5093 + SYS_GETTIMEOFDAY = 5094 + SYS_GETRLIMIT = 5095 + SYS_GETRUSAGE = 5096 + SYS_SYSINFO = 5097 + SYS_TIMES = 5098 + SYS_PTRACE = 5099 + SYS_GETUID = 5100 + SYS_SYSLOG = 5101 + SYS_GETGID = 5102 + SYS_SETUID = 5103 + SYS_SETGID = 5104 + SYS_GETEUID = 5105 + SYS_GETEGID = 5106 + SYS_SETPGID = 5107 + SYS_GETPPID = 5108 + SYS_GETPGRP = 5109 + SYS_SETSID = 5110 + SYS_SETREUID = 5111 + SYS_SETREGID = 5112 + SYS_GETGROUPS = 5113 + SYS_SETGROUPS = 5114 + SYS_SETRESUID = 5115 + SYS_GETRESUID = 5116 + SYS_SETRESGID = 5117 + SYS_GETRESGID = 5118 + SYS_GETPGID = 5119 + SYS_SETFSUID = 5120 + SYS_SETFSGID = 5121 + SYS_GETSID = 5122 + SYS_CAPGET = 5123 + SYS_CAPSET = 5124 + SYS_RT_SIGPENDING = 5125 + SYS_RT_SIGTIMEDWAIT = 5126 + SYS_RT_SIGQUEUEINFO = 5127 + SYS_RT_SIGSUSPEND = 5128 + SYS_SIGALTSTACK = 5129 + SYS_UTIME = 5130 + SYS_MKNOD = 5131 + SYS_PERSONALITY = 5132 + SYS_USTAT = 5133 + SYS_STATFS = 5134 + SYS_FSTATFS = 5135 + SYS_SYSFS = 5136 + SYS_GETPRIORITY = 5137 + SYS_SETPRIORITY = 5138 + SYS_SCHED_SETPARAM = 5139 + SYS_SCHED_GETPARAM = 5140 + SYS_SCHED_SETSCHEDULER = 5141 + SYS_SCHED_GETSCHEDULER = 5142 + SYS_SCHED_GET_PRIORITY_MAX = 5143 + SYS_SCHED_GET_PRIORITY_MIN = 5144 + SYS_SCHED_RR_GET_INTERVAL = 5145 + SYS_MLOCK = 5146 + SYS_MUNLOCK = 5147 + SYS_MLOCKALL = 5148 + SYS_MUNLOCKALL = 5149 + SYS_VHANGUP = 5150 + SYS_PIVOT_ROOT = 5151 + SYS__SYSCTL = 5152 + SYS_PRCTL = 5153 + SYS_ADJTIMEX = 5154 + SYS_SETRLIMIT = 5155 + SYS_CHROOT = 5156 + SYS_SYNC = 5157 + SYS_ACCT = 5158 + SYS_SETTIMEOFDAY = 5159 + SYS_MOUNT = 5160 + SYS_UMOUNT2 = 5161 + SYS_SWAPON = 5162 + SYS_SWAPOFF = 5163 + SYS_REBOOT = 5164 + SYS_SETHOSTNAME = 5165 + SYS_SETDOMAINNAME = 5166 + SYS_CREATE_MODULE = 5167 + SYS_INIT_MODULE = 5168 + SYS_DELETE_MODULE = 5169 + SYS_GET_KERNEL_SYMS = 5170 + SYS_QUERY_MODULE = 5171 + SYS_QUOTACTL = 5172 + SYS_NFSSERVCTL = 5173 + SYS_GETPMSG = 5174 + SYS_PUTPMSG = 5175 + SYS_AFS_SYSCALL = 5176 + SYS_RESERVED177 = 5177 + SYS_GETTID = 5178 + SYS_READAHEAD = 5179 + SYS_SETXATTR = 5180 + SYS_LSETXATTR = 5181 + SYS_FSETXATTR = 5182 + SYS_GETXATTR = 5183 + SYS_LGETXATTR = 5184 + SYS_FGETXATTR = 5185 + SYS_LISTXATTR = 5186 + SYS_LLISTXATTR = 5187 + SYS_FLISTXATTR = 5188 + SYS_REMOVEXATTR = 5189 + SYS_LREMOVEXATTR = 5190 + SYS_FREMOVEXATTR = 5191 + SYS_TKILL = 5192 + SYS_RESERVED193 = 5193 + SYS_FUTEX = 5194 + SYS_SCHED_SETAFFINITY = 5195 + SYS_SCHED_GETAFFINITY = 5196 + SYS_CACHEFLUSH = 5197 + SYS_CACHECTL = 5198 + SYS_SYSMIPS = 5199 + SYS_IO_SETUP = 5200 + SYS_IO_DESTROY = 5201 + SYS_IO_GETEVENTS = 5202 + SYS_IO_SUBMIT = 5203 + SYS_IO_CANCEL = 5204 + SYS_EXIT_GROUP = 5205 + SYS_LOOKUP_DCOOKIE = 5206 + SYS_EPOLL_CREATE = 5207 + SYS_EPOLL_CTL = 5208 + SYS_EPOLL_WAIT = 5209 + SYS_REMAP_FILE_PAGES = 5210 + SYS_RT_SIGRETURN = 5211 + SYS_SET_TID_ADDRESS = 5212 + SYS_RESTART_SYSCALL = 5213 + SYS_SEMTIMEDOP = 5214 + SYS_FADVISE64 = 5215 + SYS_TIMER_CREATE = 5216 + SYS_TIMER_SETTIME = 5217 + SYS_TIMER_GETTIME = 5218 + SYS_TIMER_GETOVERRUN = 5219 + SYS_TIMER_DELETE = 5220 + SYS_CLOCK_SETTIME = 5221 + SYS_CLOCK_GETTIME = 5222 + SYS_CLOCK_GETRES = 5223 + SYS_CLOCK_NANOSLEEP = 5224 + SYS_TGKILL = 5225 + SYS_UTIMES = 5226 + SYS_MBIND = 5227 + SYS_GET_MEMPOLICY = 5228 + SYS_SET_MEMPOLICY = 5229 + SYS_MQ_OPEN = 5230 + SYS_MQ_UNLINK = 5231 + SYS_MQ_TIMEDSEND = 5232 + SYS_MQ_TIMEDRECEIVE = 5233 + SYS_MQ_NOTIFY = 5234 + SYS_MQ_GETSETATTR = 5235 + SYS_VSERVER = 5236 + SYS_WAITID = 5237 + SYS_ADD_KEY = 5239 + SYS_REQUEST_KEY = 5240 + SYS_KEYCTL = 5241 + SYS_SET_THREAD_AREA = 5242 + SYS_INOTIFY_INIT = 5243 + SYS_INOTIFY_ADD_WATCH = 5244 + SYS_INOTIFY_RM_WATCH = 5245 + SYS_MIGRATE_PAGES = 5246 + SYS_OPENAT = 5247 + SYS_MKDIRAT = 5248 + SYS_MKNODAT = 5249 + SYS_FCHOWNAT = 5250 + SYS_FUTIMESAT = 5251 + SYS_NEWFSTATAT = 5252 + SYS_UNLINKAT = 5253 + SYS_RENAMEAT = 5254 + SYS_LINKAT = 5255 + SYS_SYMLINKAT = 5256 + SYS_READLINKAT = 5257 + SYS_FCHMODAT = 5258 + SYS_FACCESSAT = 5259 + SYS_PSELECT6 = 5260 + SYS_PPOLL = 5261 + SYS_UNSHARE = 5262 + SYS_SPLICE = 5263 + SYS_SYNC_FILE_RANGE = 5264 + SYS_TEE = 5265 + SYS_VMSPLICE = 5266 + SYS_MOVE_PAGES = 5267 + SYS_SET_ROBUST_LIST = 5268 + SYS_GET_ROBUST_LIST = 5269 + SYS_KEXEC_LOAD = 5270 + SYS_GETCPU = 5271 + SYS_EPOLL_PWAIT = 5272 + SYS_IOPRIO_SET = 5273 + SYS_IOPRIO_GET = 5274 + SYS_UTIMENSAT = 5275 + SYS_SIGNALFD = 5276 + SYS_TIMERFD = 5277 + SYS_EVENTFD = 5278 + SYS_FALLOCATE = 5279 + SYS_TIMERFD_CREATE = 5280 + SYS_TIMERFD_GETTIME = 5281 + SYS_TIMERFD_SETTIME = 5282 + SYS_SIGNALFD4 = 5283 + SYS_EVENTFD2 = 5284 + SYS_EPOLL_CREATE1 = 5285 + SYS_DUP3 = 5286 + SYS_PIPE2 = 5287 + SYS_INOTIFY_INIT1 = 5288 + SYS_PREADV = 5289 + SYS_PWRITEV = 5290 + SYS_RT_TGSIGQUEUEINFO = 5291 + SYS_PERF_EVENT_OPEN = 5292 + SYS_ACCEPT4 = 5293 + SYS_RECVMMSG = 5294 + SYS_FANOTIFY_INIT = 5295 + SYS_FANOTIFY_MARK = 5296 + SYS_PRLIMIT64 = 5297 + SYS_NAME_TO_HANDLE_AT = 5298 + SYS_OPEN_BY_HANDLE_AT = 5299 + SYS_CLOCK_ADJTIME = 5300 + SYS_SYNCFS = 5301 + SYS_SENDMMSG = 5302 + SYS_SETNS = 5303 + SYS_PROCESS_VM_READV = 5304 + SYS_PROCESS_VM_WRITEV = 5305 + SYS_KCMP = 5306 + SYS_FINIT_MODULE = 5307 + SYS_GETDENTS64 = 5308 + SYS_SCHED_SETATTR = 5309 + SYS_SCHED_GETATTR = 5310 + SYS_RENAMEAT2 = 5311 + SYS_SECCOMP = 5312 + SYS_GETRANDOM = 5313 + SYS_MEMFD_CREATE = 5314 + SYS_BPF = 5315 + SYS_EXECVEAT = 5316 + SYS_USERFAULTFD = 5317 + SYS_MEMBARRIER = 5318 + SYS_MLOCK2 = 5319 + SYS_COPY_FILE_RANGE = 5320 + SYS_PREADV2 = 5321 + SYS_PWRITEV2 = 5322 + SYS_PKEY_MPROTECT = 5323 + SYS_PKEY_ALLOC = 5324 + SYS_PKEY_FREE = 5325 + SYS_STATX = 5326 + SYS_RSEQ = 5327 + SYS_IO_PGETEVENTS = 5328 + SYS_PIDFD_SEND_SIGNAL = 5424 + SYS_IO_URING_SETUP = 5425 + SYS_IO_URING_ENTER = 5426 + SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 + SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 + SYS_FACCESSAT2 = 5439 + SYS_PROCESS_MADVISE = 5440 + SYS_EPOLL_PWAIT2 = 5441 + SYS_MOUNT_SETATTR = 5442 + SYS_QUOTACTL_FD = 5443 + SYS_LANDLOCK_CREATE_RULESET = 5444 + SYS_LANDLOCK_ADD_RULE = 5445 + SYS_LANDLOCK_RESTRICT_SELF = 5446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 80e6696b3..570a857a5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -424,4 +424,8 @@ const ( SYS_PROCESS_MADVISE = 4440 SYS_EPOLL_PWAIT2 = 4441 SYS_MOUNT_SETATTR = 4442 + SYS_QUOTACTL_FD = 4443 + SYS_LANDLOCK_CREATE_RULESET = 4444 + SYS_LANDLOCK_ADD_RULE = 4445 + SYS_LANDLOCK_RESTRICT_SELF = 4446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index b9d697ffb..638498d62 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -431,4 +431,8 @@ const ( SYS_PROCESS_MADVISE = 440 SYS_EPOLL_PWAIT2 = 441 SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 08edc54d3..702beebfe 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -7,400 +7,404 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAITPID = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_TIME = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BREAK = 17 - SYS_OLDSTAT = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_STIME = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_OLDFSTAT = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_STTY = 31 - SYS_GTTY = 32 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_FTIME = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_PROF = 44 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_LOCK = 53 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_MPX = 56 - SYS_SETPGID = 57 - SYS_ULIMIT = 58 - SYS_OLDOLDUNAME = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SGETMASK = 68 - SYS_SSETMASK = 69 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRLIMIT = 76 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SELECT = 82 - SYS_SYMLINK = 83 - SYS_OLDLSTAT = 84 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_PROFIL = 98 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_IOPERM = 101 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_OLDUNAME = 109 - SYS_IOPL = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_VM86 = 113 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_MODIFY_LDT = 123 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_QUERY_MODULE = 166 - SYS_POLL = 167 - SYS_NFSSERVCTL = 168 - SYS_SETRESGID = 169 - SYS_GETRESGID = 170 - SYS_PRCTL = 171 - SYS_RT_SIGRETURN = 172 - SYS_RT_SIGACTION = 173 - SYS_RT_SIGPROCMASK = 174 - SYS_RT_SIGPENDING = 175 - SYS_RT_SIGTIMEDWAIT = 176 - SYS_RT_SIGQUEUEINFO = 177 - SYS_RT_SIGSUSPEND = 178 - SYS_PREAD64 = 179 - SYS_PWRITE64 = 180 - SYS_CHOWN = 181 - SYS_GETCWD = 182 - SYS_CAPGET = 183 - SYS_CAPSET = 184 - SYS_SIGALTSTACK = 185 - SYS_SENDFILE = 186 - SYS_GETPMSG = 187 - SYS_PUTPMSG = 188 - SYS_VFORK = 189 - SYS_UGETRLIMIT = 190 - SYS_READAHEAD = 191 - SYS_PCICONFIG_READ = 198 - SYS_PCICONFIG_WRITE = 199 - SYS_PCICONFIG_IOBASE = 200 - SYS_MULTIPLEXER = 201 - SYS_GETDENTS64 = 202 - SYS_PIVOT_ROOT = 203 - SYS_MADVISE = 205 - SYS_MINCORE = 206 - SYS_GETTID = 207 - SYS_TKILL = 208 - SYS_SETXATTR = 209 - SYS_LSETXATTR = 210 - SYS_FSETXATTR = 211 - SYS_GETXATTR = 212 - SYS_LGETXATTR = 213 - SYS_FGETXATTR = 214 - SYS_LISTXATTR = 215 - SYS_LLISTXATTR = 216 - SYS_FLISTXATTR = 217 - SYS_REMOVEXATTR = 218 - SYS_LREMOVEXATTR = 219 - SYS_FREMOVEXATTR = 220 - SYS_FUTEX = 221 - SYS_SCHED_SETAFFINITY = 222 - SYS_SCHED_GETAFFINITY = 223 - SYS_TUXCALL = 225 - SYS_IO_SETUP = 227 - SYS_IO_DESTROY = 228 - SYS_IO_GETEVENTS = 229 - SYS_IO_SUBMIT = 230 - SYS_IO_CANCEL = 231 - SYS_SET_TID_ADDRESS = 232 - SYS_FADVISE64 = 233 - SYS_EXIT_GROUP = 234 - SYS_LOOKUP_DCOOKIE = 235 - SYS_EPOLL_CREATE = 236 - SYS_EPOLL_CTL = 237 - SYS_EPOLL_WAIT = 238 - SYS_REMAP_FILE_PAGES = 239 - SYS_TIMER_CREATE = 240 - SYS_TIMER_SETTIME = 241 - SYS_TIMER_GETTIME = 242 - SYS_TIMER_GETOVERRUN = 243 - SYS_TIMER_DELETE = 244 - SYS_CLOCK_SETTIME = 245 - SYS_CLOCK_GETTIME = 246 - SYS_CLOCK_GETRES = 247 - SYS_CLOCK_NANOSLEEP = 248 - SYS_SWAPCONTEXT = 249 - SYS_TGKILL = 250 - SYS_UTIMES = 251 - SYS_STATFS64 = 252 - SYS_FSTATFS64 = 253 - SYS_RTAS = 255 - SYS_SYS_DEBUG_SETCONTEXT = 256 - SYS_MIGRATE_PAGES = 258 - SYS_MBIND = 259 - SYS_GET_MEMPOLICY = 260 - SYS_SET_MEMPOLICY = 261 - SYS_MQ_OPEN = 262 - SYS_MQ_UNLINK = 263 - SYS_MQ_TIMEDSEND = 264 - SYS_MQ_TIMEDRECEIVE = 265 - SYS_MQ_NOTIFY = 266 - SYS_MQ_GETSETATTR = 267 - SYS_KEXEC_LOAD = 268 - SYS_ADD_KEY = 269 - SYS_REQUEST_KEY = 270 - SYS_KEYCTL = 271 - SYS_WAITID = 272 - SYS_IOPRIO_SET = 273 - SYS_IOPRIO_GET = 274 - SYS_INOTIFY_INIT = 275 - SYS_INOTIFY_ADD_WATCH = 276 - SYS_INOTIFY_RM_WATCH = 277 - SYS_SPU_RUN = 278 - SYS_SPU_CREATE = 279 - SYS_PSELECT6 = 280 - SYS_PPOLL = 281 - SYS_UNSHARE = 282 - SYS_SPLICE = 283 - SYS_TEE = 284 - SYS_VMSPLICE = 285 - SYS_OPENAT = 286 - SYS_MKDIRAT = 287 - SYS_MKNODAT = 288 - SYS_FCHOWNAT = 289 - SYS_FUTIMESAT = 290 - SYS_NEWFSTATAT = 291 - SYS_UNLINKAT = 292 - SYS_RENAMEAT = 293 - SYS_LINKAT = 294 - SYS_SYMLINKAT = 295 - SYS_READLINKAT = 296 - SYS_FCHMODAT = 297 - SYS_FACCESSAT = 298 - SYS_GET_ROBUST_LIST = 299 - SYS_SET_ROBUST_LIST = 300 - SYS_MOVE_PAGES = 301 - SYS_GETCPU = 302 - SYS_EPOLL_PWAIT = 303 - SYS_UTIMENSAT = 304 - SYS_SIGNALFD = 305 - SYS_TIMERFD_CREATE = 306 - SYS_EVENTFD = 307 - SYS_SYNC_FILE_RANGE2 = 308 - SYS_FALLOCATE = 309 - SYS_SUBPAGE_PROT = 310 - SYS_TIMERFD_SETTIME = 311 - SYS_TIMERFD_GETTIME = 312 - SYS_SIGNALFD4 = 313 - SYS_EVENTFD2 = 314 - SYS_EPOLL_CREATE1 = 315 - SYS_DUP3 = 316 - SYS_PIPE2 = 317 - SYS_INOTIFY_INIT1 = 318 - SYS_PERF_EVENT_OPEN = 319 - SYS_PREADV = 320 - SYS_PWRITEV = 321 - SYS_RT_TGSIGQUEUEINFO = 322 - SYS_FANOTIFY_INIT = 323 - SYS_FANOTIFY_MARK = 324 - SYS_PRLIMIT64 = 325 - SYS_SOCKET = 326 - SYS_BIND = 327 - SYS_CONNECT = 328 - SYS_LISTEN = 329 - SYS_ACCEPT = 330 - SYS_GETSOCKNAME = 331 - SYS_GETPEERNAME = 332 - SYS_SOCKETPAIR = 333 - SYS_SEND = 334 - SYS_SENDTO = 335 - SYS_RECV = 336 - SYS_RECVFROM = 337 - SYS_SHUTDOWN = 338 - SYS_SETSOCKOPT = 339 - SYS_GETSOCKOPT = 340 - SYS_SENDMSG = 341 - SYS_RECVMSG = 342 - SYS_RECVMMSG = 343 - SYS_ACCEPT4 = 344 - SYS_NAME_TO_HANDLE_AT = 345 - SYS_OPEN_BY_HANDLE_AT = 346 - SYS_CLOCK_ADJTIME = 347 - SYS_SYNCFS = 348 - SYS_SENDMMSG = 349 - SYS_SETNS = 350 - SYS_PROCESS_VM_READV = 351 - SYS_PROCESS_VM_WRITEV = 352 - SYS_FINIT_MODULE = 353 - SYS_KCMP = 354 - SYS_SCHED_SETATTR = 355 - SYS_SCHED_GETATTR = 356 - SYS_RENAMEAT2 = 357 - SYS_SECCOMP = 358 - SYS_GETRANDOM = 359 - SYS_MEMFD_CREATE = 360 - SYS_BPF = 361 - SYS_EXECVEAT = 362 - SYS_SWITCH_ENDIAN = 363 - SYS_USERFAULTFD = 364 - SYS_MEMBARRIER = 365 - SYS_MLOCK2 = 378 - SYS_COPY_FILE_RANGE = 379 - SYS_PREADV2 = 380 - SYS_PWRITEV2 = 381 - SYS_KEXEC_FILE_LOAD = 382 - SYS_STATX = 383 - SYS_PKEY_ALLOC = 384 - SYS_PKEY_FREE = 385 - SYS_PKEY_MPROTECT = 386 - SYS_RSEQ = 387 - SYS_IO_PGETEVENTS = 388 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_NEWFSTATAT = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 + SYS_EXECVEAT = 362 + SYS_SWITCH_ENDIAN = 363 + SYS_USERFAULTFD = 364 + SYS_MEMBARRIER = 365 + SYS_MLOCK2 = 378 + SYS_COPY_FILE_RANGE = 379 + SYS_PREADV2 = 380 + SYS_PWRITEV2 = 381 + SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 + SYS_PKEY_ALLOC = 384 + SYS_PKEY_FREE = 385 + SYS_PKEY_MPROTECT = 386 + SYS_RSEQ = 387 + SYS_IO_PGETEVENTS = 388 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 33b33b083..bfc87ea44 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -7,400 +7,404 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAITPID = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_TIME = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BREAK = 17 - SYS_OLDSTAT = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_STIME = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_OLDFSTAT = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_STTY = 31 - SYS_GTTY = 32 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_FTIME = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_PROF = 44 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_LOCK = 53 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_MPX = 56 - SYS_SETPGID = 57 - SYS_ULIMIT = 58 - SYS_OLDOLDUNAME = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SGETMASK = 68 - SYS_SSETMASK = 69 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRLIMIT = 76 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SELECT = 82 - SYS_SYMLINK = 83 - SYS_OLDLSTAT = 84 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_PROFIL = 98 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_IOPERM = 101 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_OLDUNAME = 109 - SYS_IOPL = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_VM86 = 113 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_MODIFY_LDT = 123 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_QUERY_MODULE = 166 - SYS_POLL = 167 - SYS_NFSSERVCTL = 168 - SYS_SETRESGID = 169 - SYS_GETRESGID = 170 - SYS_PRCTL = 171 - SYS_RT_SIGRETURN = 172 - SYS_RT_SIGACTION = 173 - SYS_RT_SIGPROCMASK = 174 - SYS_RT_SIGPENDING = 175 - SYS_RT_SIGTIMEDWAIT = 176 - SYS_RT_SIGQUEUEINFO = 177 - SYS_RT_SIGSUSPEND = 178 - SYS_PREAD64 = 179 - SYS_PWRITE64 = 180 - SYS_CHOWN = 181 - SYS_GETCWD = 182 - SYS_CAPGET = 183 - SYS_CAPSET = 184 - SYS_SIGALTSTACK = 185 - SYS_SENDFILE = 186 - SYS_GETPMSG = 187 - SYS_PUTPMSG = 188 - SYS_VFORK = 189 - SYS_UGETRLIMIT = 190 - SYS_READAHEAD = 191 - SYS_PCICONFIG_READ = 198 - SYS_PCICONFIG_WRITE = 199 - SYS_PCICONFIG_IOBASE = 200 - SYS_MULTIPLEXER = 201 - SYS_GETDENTS64 = 202 - SYS_PIVOT_ROOT = 203 - SYS_MADVISE = 205 - SYS_MINCORE = 206 - SYS_GETTID = 207 - SYS_TKILL = 208 - SYS_SETXATTR = 209 - SYS_LSETXATTR = 210 - SYS_FSETXATTR = 211 - SYS_GETXATTR = 212 - SYS_LGETXATTR = 213 - SYS_FGETXATTR = 214 - SYS_LISTXATTR = 215 - SYS_LLISTXATTR = 216 - SYS_FLISTXATTR = 217 - SYS_REMOVEXATTR = 218 - SYS_LREMOVEXATTR = 219 - SYS_FREMOVEXATTR = 220 - SYS_FUTEX = 221 - SYS_SCHED_SETAFFINITY = 222 - SYS_SCHED_GETAFFINITY = 223 - SYS_TUXCALL = 225 - SYS_IO_SETUP = 227 - SYS_IO_DESTROY = 228 - SYS_IO_GETEVENTS = 229 - SYS_IO_SUBMIT = 230 - SYS_IO_CANCEL = 231 - SYS_SET_TID_ADDRESS = 232 - SYS_FADVISE64 = 233 - SYS_EXIT_GROUP = 234 - SYS_LOOKUP_DCOOKIE = 235 - SYS_EPOLL_CREATE = 236 - SYS_EPOLL_CTL = 237 - SYS_EPOLL_WAIT = 238 - SYS_REMAP_FILE_PAGES = 239 - SYS_TIMER_CREATE = 240 - SYS_TIMER_SETTIME = 241 - SYS_TIMER_GETTIME = 242 - SYS_TIMER_GETOVERRUN = 243 - SYS_TIMER_DELETE = 244 - SYS_CLOCK_SETTIME = 245 - SYS_CLOCK_GETTIME = 246 - SYS_CLOCK_GETRES = 247 - SYS_CLOCK_NANOSLEEP = 248 - SYS_SWAPCONTEXT = 249 - SYS_TGKILL = 250 - SYS_UTIMES = 251 - SYS_STATFS64 = 252 - SYS_FSTATFS64 = 253 - SYS_RTAS = 255 - SYS_SYS_DEBUG_SETCONTEXT = 256 - SYS_MIGRATE_PAGES = 258 - SYS_MBIND = 259 - SYS_GET_MEMPOLICY = 260 - SYS_SET_MEMPOLICY = 261 - SYS_MQ_OPEN = 262 - SYS_MQ_UNLINK = 263 - SYS_MQ_TIMEDSEND = 264 - SYS_MQ_TIMEDRECEIVE = 265 - SYS_MQ_NOTIFY = 266 - SYS_MQ_GETSETATTR = 267 - SYS_KEXEC_LOAD = 268 - SYS_ADD_KEY = 269 - SYS_REQUEST_KEY = 270 - SYS_KEYCTL = 271 - SYS_WAITID = 272 - SYS_IOPRIO_SET = 273 - SYS_IOPRIO_GET = 274 - SYS_INOTIFY_INIT = 275 - SYS_INOTIFY_ADD_WATCH = 276 - SYS_INOTIFY_RM_WATCH = 277 - SYS_SPU_RUN = 278 - SYS_SPU_CREATE = 279 - SYS_PSELECT6 = 280 - SYS_PPOLL = 281 - SYS_UNSHARE = 282 - SYS_SPLICE = 283 - SYS_TEE = 284 - SYS_VMSPLICE = 285 - SYS_OPENAT = 286 - SYS_MKDIRAT = 287 - SYS_MKNODAT = 288 - SYS_FCHOWNAT = 289 - SYS_FUTIMESAT = 290 - SYS_NEWFSTATAT = 291 - SYS_UNLINKAT = 292 - SYS_RENAMEAT = 293 - SYS_LINKAT = 294 - SYS_SYMLINKAT = 295 - SYS_READLINKAT = 296 - SYS_FCHMODAT = 297 - SYS_FACCESSAT = 298 - SYS_GET_ROBUST_LIST = 299 - SYS_SET_ROBUST_LIST = 300 - SYS_MOVE_PAGES = 301 - SYS_GETCPU = 302 - SYS_EPOLL_PWAIT = 303 - SYS_UTIMENSAT = 304 - SYS_SIGNALFD = 305 - SYS_TIMERFD_CREATE = 306 - SYS_EVENTFD = 307 - SYS_SYNC_FILE_RANGE2 = 308 - SYS_FALLOCATE = 309 - SYS_SUBPAGE_PROT = 310 - SYS_TIMERFD_SETTIME = 311 - SYS_TIMERFD_GETTIME = 312 - SYS_SIGNALFD4 = 313 - SYS_EVENTFD2 = 314 - SYS_EPOLL_CREATE1 = 315 - SYS_DUP3 = 316 - SYS_PIPE2 = 317 - SYS_INOTIFY_INIT1 = 318 - SYS_PERF_EVENT_OPEN = 319 - SYS_PREADV = 320 - SYS_PWRITEV = 321 - SYS_RT_TGSIGQUEUEINFO = 322 - SYS_FANOTIFY_INIT = 323 - SYS_FANOTIFY_MARK = 324 - SYS_PRLIMIT64 = 325 - SYS_SOCKET = 326 - SYS_BIND = 327 - SYS_CONNECT = 328 - SYS_LISTEN = 329 - SYS_ACCEPT = 330 - SYS_GETSOCKNAME = 331 - SYS_GETPEERNAME = 332 - SYS_SOCKETPAIR = 333 - SYS_SEND = 334 - SYS_SENDTO = 335 - SYS_RECV = 336 - SYS_RECVFROM = 337 - SYS_SHUTDOWN = 338 - SYS_SETSOCKOPT = 339 - SYS_GETSOCKOPT = 340 - SYS_SENDMSG = 341 - SYS_RECVMSG = 342 - SYS_RECVMMSG = 343 - SYS_ACCEPT4 = 344 - SYS_NAME_TO_HANDLE_AT = 345 - SYS_OPEN_BY_HANDLE_AT = 346 - SYS_CLOCK_ADJTIME = 347 - SYS_SYNCFS = 348 - SYS_SENDMMSG = 349 - SYS_SETNS = 350 - SYS_PROCESS_VM_READV = 351 - SYS_PROCESS_VM_WRITEV = 352 - SYS_FINIT_MODULE = 353 - SYS_KCMP = 354 - SYS_SCHED_SETATTR = 355 - SYS_SCHED_GETATTR = 356 - SYS_RENAMEAT2 = 357 - SYS_SECCOMP = 358 - SYS_GETRANDOM = 359 - SYS_MEMFD_CREATE = 360 - SYS_BPF = 361 - SYS_EXECVEAT = 362 - SYS_SWITCH_ENDIAN = 363 - SYS_USERFAULTFD = 364 - SYS_MEMBARRIER = 365 - SYS_MLOCK2 = 378 - SYS_COPY_FILE_RANGE = 379 - SYS_PREADV2 = 380 - SYS_PWRITEV2 = 381 - SYS_KEXEC_FILE_LOAD = 382 - SYS_STATX = 383 - SYS_PKEY_ALLOC = 384 - SYS_PKEY_FREE = 385 - SYS_PKEY_MPROTECT = 386 - SYS_RSEQ = 387 - SYS_IO_PGETEVENTS = 388 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_NEWFSTATAT = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 + SYS_EXECVEAT = 362 + SYS_SWITCH_ENDIAN = 363 + SYS_USERFAULTFD = 364 + SYS_MEMBARRIER = 365 + SYS_MLOCK2 = 378 + SYS_COPY_FILE_RANGE = 379 + SYS_PREADV2 = 380 + SYS_PWRITEV2 = 381 + SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 + SYS_PKEY_ALLOC = 384 + SYS_PKEY_FREE = 385 + SYS_PKEY_MPROTECT = 386 + SYS_RSEQ = 387 + SYS_IO_PGETEVENTS = 388 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 66c8a8e09..a390e147d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -7,302 +7,306 @@ package unix const ( - SYS_IO_SETUP = 0 - SYS_IO_DESTROY = 1 - SYS_IO_SUBMIT = 2 - SYS_IO_CANCEL = 3 - SYS_IO_GETEVENTS = 4 - SYS_SETXATTR = 5 - SYS_LSETXATTR = 6 - SYS_FSETXATTR = 7 - SYS_GETXATTR = 8 - SYS_LGETXATTR = 9 - SYS_FGETXATTR = 10 - SYS_LISTXATTR = 11 - SYS_LLISTXATTR = 12 - SYS_FLISTXATTR = 13 - SYS_REMOVEXATTR = 14 - SYS_LREMOVEXATTR = 15 - SYS_FREMOVEXATTR = 16 - SYS_GETCWD = 17 - SYS_LOOKUP_DCOOKIE = 18 - SYS_EVENTFD2 = 19 - SYS_EPOLL_CREATE1 = 20 - SYS_EPOLL_CTL = 21 - SYS_EPOLL_PWAIT = 22 - SYS_DUP = 23 - SYS_DUP3 = 24 - SYS_FCNTL = 25 - SYS_INOTIFY_INIT1 = 26 - SYS_INOTIFY_ADD_WATCH = 27 - SYS_INOTIFY_RM_WATCH = 28 - SYS_IOCTL = 29 - SYS_IOPRIO_SET = 30 - SYS_IOPRIO_GET = 31 - SYS_FLOCK = 32 - SYS_MKNODAT = 33 - SYS_MKDIRAT = 34 - SYS_UNLINKAT = 35 - SYS_SYMLINKAT = 36 - SYS_LINKAT = 37 - SYS_UMOUNT2 = 39 - SYS_MOUNT = 40 - SYS_PIVOT_ROOT = 41 - SYS_NFSSERVCTL = 42 - SYS_STATFS = 43 - SYS_FSTATFS = 44 - SYS_TRUNCATE = 45 - SYS_FTRUNCATE = 46 - SYS_FALLOCATE = 47 - SYS_FACCESSAT = 48 - SYS_CHDIR = 49 - SYS_FCHDIR = 50 - SYS_CHROOT = 51 - SYS_FCHMOD = 52 - SYS_FCHMODAT = 53 - SYS_FCHOWNAT = 54 - SYS_FCHOWN = 55 - SYS_OPENAT = 56 - SYS_CLOSE = 57 - SYS_VHANGUP = 58 - SYS_PIPE2 = 59 - SYS_QUOTACTL = 60 - SYS_GETDENTS64 = 61 - SYS_LSEEK = 62 - SYS_READ = 63 - SYS_WRITE = 64 - SYS_READV = 65 - SYS_WRITEV = 66 - SYS_PREAD64 = 67 - SYS_PWRITE64 = 68 - SYS_PREADV = 69 - SYS_PWRITEV = 70 - SYS_SENDFILE = 71 - SYS_PSELECT6 = 72 - SYS_PPOLL = 73 - SYS_SIGNALFD4 = 74 - SYS_VMSPLICE = 75 - SYS_SPLICE = 76 - SYS_TEE = 77 - SYS_READLINKAT = 78 - SYS_FSTATAT = 79 - SYS_FSTAT = 80 - SYS_SYNC = 81 - SYS_FSYNC = 82 - SYS_FDATASYNC = 83 - SYS_SYNC_FILE_RANGE = 84 - SYS_TIMERFD_CREATE = 85 - SYS_TIMERFD_SETTIME = 86 - SYS_TIMERFD_GETTIME = 87 - SYS_UTIMENSAT = 88 - SYS_ACCT = 89 - SYS_CAPGET = 90 - SYS_CAPSET = 91 - SYS_PERSONALITY = 92 - SYS_EXIT = 93 - SYS_EXIT_GROUP = 94 - SYS_WAITID = 95 - SYS_SET_TID_ADDRESS = 96 - SYS_UNSHARE = 97 - SYS_FUTEX = 98 - SYS_SET_ROBUST_LIST = 99 - SYS_GET_ROBUST_LIST = 100 - SYS_NANOSLEEP = 101 - SYS_GETITIMER = 102 - SYS_SETITIMER = 103 - SYS_KEXEC_LOAD = 104 - SYS_INIT_MODULE = 105 - SYS_DELETE_MODULE = 106 - SYS_TIMER_CREATE = 107 - SYS_TIMER_GETTIME = 108 - SYS_TIMER_GETOVERRUN = 109 - SYS_TIMER_SETTIME = 110 - SYS_TIMER_DELETE = 111 - SYS_CLOCK_SETTIME = 112 - SYS_CLOCK_GETTIME = 113 - SYS_CLOCK_GETRES = 114 - SYS_CLOCK_NANOSLEEP = 115 - SYS_SYSLOG = 116 - SYS_PTRACE = 117 - SYS_SCHED_SETPARAM = 118 - SYS_SCHED_SETSCHEDULER = 119 - SYS_SCHED_GETSCHEDULER = 120 - SYS_SCHED_GETPARAM = 121 - SYS_SCHED_SETAFFINITY = 122 - SYS_SCHED_GETAFFINITY = 123 - SYS_SCHED_YIELD = 124 - SYS_SCHED_GET_PRIORITY_MAX = 125 - SYS_SCHED_GET_PRIORITY_MIN = 126 - SYS_SCHED_RR_GET_INTERVAL = 127 - SYS_RESTART_SYSCALL = 128 - SYS_KILL = 129 - SYS_TKILL = 130 - SYS_TGKILL = 131 - SYS_SIGALTSTACK = 132 - SYS_RT_SIGSUSPEND = 133 - SYS_RT_SIGACTION = 134 - SYS_RT_SIGPROCMASK = 135 - SYS_RT_SIGPENDING = 136 - SYS_RT_SIGTIMEDWAIT = 137 - SYS_RT_SIGQUEUEINFO = 138 - SYS_RT_SIGRETURN = 139 - SYS_SETPRIORITY = 140 - SYS_GETPRIORITY = 141 - SYS_REBOOT = 142 - SYS_SETREGID = 143 - SYS_SETGID = 144 - SYS_SETREUID = 145 - SYS_SETUID = 146 - SYS_SETRESUID = 147 - SYS_GETRESUID = 148 - SYS_SETRESGID = 149 - SYS_GETRESGID = 150 - SYS_SETFSUID = 151 - SYS_SETFSGID = 152 - SYS_TIMES = 153 - SYS_SETPGID = 154 - SYS_GETPGID = 155 - SYS_GETSID = 156 - SYS_SETSID = 157 - SYS_GETGROUPS = 158 - SYS_SETGROUPS = 159 - SYS_UNAME = 160 - SYS_SETHOSTNAME = 161 - SYS_SETDOMAINNAME = 162 - SYS_GETRLIMIT = 163 - SYS_SETRLIMIT = 164 - SYS_GETRUSAGE = 165 - SYS_UMASK = 166 - SYS_PRCTL = 167 - SYS_GETCPU = 168 - SYS_GETTIMEOFDAY = 169 - SYS_SETTIMEOFDAY = 170 - SYS_ADJTIMEX = 171 - SYS_GETPID = 172 - SYS_GETPPID = 173 - SYS_GETUID = 174 - SYS_GETEUID = 175 - SYS_GETGID = 176 - SYS_GETEGID = 177 - SYS_GETTID = 178 - SYS_SYSINFO = 179 - SYS_MQ_OPEN = 180 - SYS_MQ_UNLINK = 181 - SYS_MQ_TIMEDSEND = 182 - SYS_MQ_TIMEDRECEIVE = 183 - SYS_MQ_NOTIFY = 184 - SYS_MQ_GETSETATTR = 185 - SYS_MSGGET = 186 - SYS_MSGCTL = 187 - SYS_MSGRCV = 188 - SYS_MSGSND = 189 - SYS_SEMGET = 190 - SYS_SEMCTL = 191 - SYS_SEMTIMEDOP = 192 - SYS_SEMOP = 193 - SYS_SHMGET = 194 - SYS_SHMCTL = 195 - SYS_SHMAT = 196 - SYS_SHMDT = 197 - SYS_SOCKET = 198 - SYS_SOCKETPAIR = 199 - SYS_BIND = 200 - SYS_LISTEN = 201 - SYS_ACCEPT = 202 - SYS_CONNECT = 203 - SYS_GETSOCKNAME = 204 - SYS_GETPEERNAME = 205 - SYS_SENDTO = 206 - SYS_RECVFROM = 207 - SYS_SETSOCKOPT = 208 - SYS_GETSOCKOPT = 209 - SYS_SHUTDOWN = 210 - SYS_SENDMSG = 211 - SYS_RECVMSG = 212 - SYS_READAHEAD = 213 - SYS_BRK = 214 - SYS_MUNMAP = 215 - SYS_MREMAP = 216 - SYS_ADD_KEY = 217 - SYS_REQUEST_KEY = 218 - SYS_KEYCTL = 219 - SYS_CLONE = 220 - SYS_EXECVE = 221 - SYS_MMAP = 222 - SYS_FADVISE64 = 223 - SYS_SWAPON = 224 - SYS_SWAPOFF = 225 - SYS_MPROTECT = 226 - SYS_MSYNC = 227 - SYS_MLOCK = 228 - SYS_MUNLOCK = 229 - SYS_MLOCKALL = 230 - SYS_MUNLOCKALL = 231 - SYS_MINCORE = 232 - SYS_MADVISE = 233 - SYS_REMAP_FILE_PAGES = 234 - SYS_MBIND = 235 - SYS_GET_MEMPOLICY = 236 - SYS_SET_MEMPOLICY = 237 - SYS_MIGRATE_PAGES = 238 - SYS_MOVE_PAGES = 239 - SYS_RT_TGSIGQUEUEINFO = 240 - SYS_PERF_EVENT_OPEN = 241 - SYS_ACCEPT4 = 242 - SYS_RECVMMSG = 243 - SYS_ARCH_SPECIFIC_SYSCALL = 244 - SYS_WAIT4 = 260 - SYS_PRLIMIT64 = 261 - SYS_FANOTIFY_INIT = 262 - SYS_FANOTIFY_MARK = 263 - SYS_NAME_TO_HANDLE_AT = 264 - SYS_OPEN_BY_HANDLE_AT = 265 - SYS_CLOCK_ADJTIME = 266 - SYS_SYNCFS = 267 - SYS_SETNS = 268 - SYS_SENDMMSG = 269 - SYS_PROCESS_VM_READV = 270 - SYS_PROCESS_VM_WRITEV = 271 - SYS_KCMP = 272 - SYS_FINIT_MODULE = 273 - SYS_SCHED_SETATTR = 274 - SYS_SCHED_GETATTR = 275 - SYS_RENAMEAT2 = 276 - SYS_SECCOMP = 277 - SYS_GETRANDOM = 278 - SYS_MEMFD_CREATE = 279 - SYS_BPF = 280 - SYS_EXECVEAT = 281 - SYS_USERFAULTFD = 282 - SYS_MEMBARRIER = 283 - SYS_MLOCK2 = 284 - SYS_COPY_FILE_RANGE = 285 - SYS_PREADV2 = 286 - SYS_PWRITEV2 = 287 - SYS_PKEY_MPROTECT = 288 - SYS_PKEY_ALLOC = 289 - SYS_PKEY_FREE = 290 - SYS_STATX = 291 - SYS_IO_PGETEVENTS = 292 - SYS_RSEQ = 293 - SYS_KEXEC_FILE_LOAD = 294 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_FSTATAT = 79 + SYS_FSTAT = 80 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRLIMIT = 163 + SYS_SETRLIMIT = 164 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index aea5760ce..3e791e6cd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -7,365 +7,369 @@ package unix const ( - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_RESTART_SYSCALL = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_BRK = 45 - SYS_SIGNAL = 48 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_SETPGID = 57 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_SYMLINK = 83 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_LOOKUP_DCOOKIE = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_GETDENTS = 141 - SYS_SELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_QUERY_MODULE = 167 - SYS_POLL = 168 - SYS_NFSSERVCTL = 169 - SYS_PRCTL = 172 - SYS_RT_SIGRETURN = 173 - SYS_RT_SIGACTION = 174 - SYS_RT_SIGPROCMASK = 175 - SYS_RT_SIGPENDING = 176 - SYS_RT_SIGTIMEDWAIT = 177 - SYS_RT_SIGQUEUEINFO = 178 - SYS_RT_SIGSUSPEND = 179 - SYS_PREAD64 = 180 - SYS_PWRITE64 = 181 - SYS_GETCWD = 183 - SYS_CAPGET = 184 - SYS_CAPSET = 185 - SYS_SIGALTSTACK = 186 - SYS_SENDFILE = 187 - SYS_GETPMSG = 188 - SYS_PUTPMSG = 189 - SYS_VFORK = 190 - SYS_GETRLIMIT = 191 - SYS_LCHOWN = 198 - SYS_GETUID = 199 - SYS_GETGID = 200 - SYS_GETEUID = 201 - SYS_GETEGID = 202 - SYS_SETREUID = 203 - SYS_SETREGID = 204 - SYS_GETGROUPS = 205 - SYS_SETGROUPS = 206 - SYS_FCHOWN = 207 - SYS_SETRESUID = 208 - SYS_GETRESUID = 209 - SYS_SETRESGID = 210 - SYS_GETRESGID = 211 - SYS_CHOWN = 212 - SYS_SETUID = 213 - SYS_SETGID = 214 - SYS_SETFSUID = 215 - SYS_SETFSGID = 216 - SYS_PIVOT_ROOT = 217 - SYS_MINCORE = 218 - SYS_MADVISE = 219 - SYS_GETDENTS64 = 220 - SYS_READAHEAD = 222 - SYS_SETXATTR = 224 - SYS_LSETXATTR = 225 - SYS_FSETXATTR = 226 - SYS_GETXATTR = 227 - SYS_LGETXATTR = 228 - SYS_FGETXATTR = 229 - SYS_LISTXATTR = 230 - SYS_LLISTXATTR = 231 - SYS_FLISTXATTR = 232 - SYS_REMOVEXATTR = 233 - SYS_LREMOVEXATTR = 234 - SYS_FREMOVEXATTR = 235 - SYS_GETTID = 236 - SYS_TKILL = 237 - SYS_FUTEX = 238 - SYS_SCHED_SETAFFINITY = 239 - SYS_SCHED_GETAFFINITY = 240 - SYS_TGKILL = 241 - SYS_IO_SETUP = 243 - SYS_IO_DESTROY = 244 - SYS_IO_GETEVENTS = 245 - SYS_IO_SUBMIT = 246 - SYS_IO_CANCEL = 247 - SYS_EXIT_GROUP = 248 - SYS_EPOLL_CREATE = 249 - SYS_EPOLL_CTL = 250 - SYS_EPOLL_WAIT = 251 - SYS_SET_TID_ADDRESS = 252 - SYS_FADVISE64 = 253 - SYS_TIMER_CREATE = 254 - SYS_TIMER_SETTIME = 255 - SYS_TIMER_GETTIME = 256 - SYS_TIMER_GETOVERRUN = 257 - SYS_TIMER_DELETE = 258 - SYS_CLOCK_SETTIME = 259 - SYS_CLOCK_GETTIME = 260 - SYS_CLOCK_GETRES = 261 - SYS_CLOCK_NANOSLEEP = 262 - SYS_STATFS64 = 265 - SYS_FSTATFS64 = 266 - SYS_REMAP_FILE_PAGES = 267 - SYS_MBIND = 268 - SYS_GET_MEMPOLICY = 269 - SYS_SET_MEMPOLICY = 270 - SYS_MQ_OPEN = 271 - SYS_MQ_UNLINK = 272 - SYS_MQ_TIMEDSEND = 273 - SYS_MQ_TIMEDRECEIVE = 274 - SYS_MQ_NOTIFY = 275 - SYS_MQ_GETSETATTR = 276 - SYS_KEXEC_LOAD = 277 - SYS_ADD_KEY = 278 - SYS_REQUEST_KEY = 279 - SYS_KEYCTL = 280 - SYS_WAITID = 281 - SYS_IOPRIO_SET = 282 - SYS_IOPRIO_GET = 283 - SYS_INOTIFY_INIT = 284 - SYS_INOTIFY_ADD_WATCH = 285 - SYS_INOTIFY_RM_WATCH = 286 - SYS_MIGRATE_PAGES = 287 - SYS_OPENAT = 288 - SYS_MKDIRAT = 289 - SYS_MKNODAT = 290 - SYS_FCHOWNAT = 291 - SYS_FUTIMESAT = 292 - SYS_NEWFSTATAT = 293 - SYS_UNLINKAT = 294 - SYS_RENAMEAT = 295 - SYS_LINKAT = 296 - SYS_SYMLINKAT = 297 - SYS_READLINKAT = 298 - SYS_FCHMODAT = 299 - SYS_FACCESSAT = 300 - SYS_PSELECT6 = 301 - SYS_PPOLL = 302 - SYS_UNSHARE = 303 - SYS_SET_ROBUST_LIST = 304 - SYS_GET_ROBUST_LIST = 305 - SYS_SPLICE = 306 - SYS_SYNC_FILE_RANGE = 307 - SYS_TEE = 308 - SYS_VMSPLICE = 309 - SYS_MOVE_PAGES = 310 - SYS_GETCPU = 311 - SYS_EPOLL_PWAIT = 312 - SYS_UTIMES = 313 - SYS_FALLOCATE = 314 - SYS_UTIMENSAT = 315 - SYS_SIGNALFD = 316 - SYS_TIMERFD = 317 - SYS_EVENTFD = 318 - SYS_TIMERFD_CREATE = 319 - SYS_TIMERFD_SETTIME = 320 - SYS_TIMERFD_GETTIME = 321 - SYS_SIGNALFD4 = 322 - SYS_EVENTFD2 = 323 - SYS_INOTIFY_INIT1 = 324 - SYS_PIPE2 = 325 - SYS_DUP3 = 326 - SYS_EPOLL_CREATE1 = 327 - SYS_PREADV = 328 - SYS_PWRITEV = 329 - SYS_RT_TGSIGQUEUEINFO = 330 - SYS_PERF_EVENT_OPEN = 331 - SYS_FANOTIFY_INIT = 332 - SYS_FANOTIFY_MARK = 333 - SYS_PRLIMIT64 = 334 - SYS_NAME_TO_HANDLE_AT = 335 - SYS_OPEN_BY_HANDLE_AT = 336 - SYS_CLOCK_ADJTIME = 337 - SYS_SYNCFS = 338 - SYS_SETNS = 339 - SYS_PROCESS_VM_READV = 340 - SYS_PROCESS_VM_WRITEV = 341 - SYS_S390_RUNTIME_INSTR = 342 - SYS_KCMP = 343 - SYS_FINIT_MODULE = 344 - SYS_SCHED_SETATTR = 345 - SYS_SCHED_GETATTR = 346 - SYS_RENAMEAT2 = 347 - SYS_SECCOMP = 348 - SYS_GETRANDOM = 349 - SYS_MEMFD_CREATE = 350 - SYS_BPF = 351 - SYS_S390_PCI_MMIO_WRITE = 352 - SYS_S390_PCI_MMIO_READ = 353 - SYS_EXECVEAT = 354 - SYS_USERFAULTFD = 355 - SYS_MEMBARRIER = 356 - SYS_RECVMMSG = 357 - SYS_SENDMMSG = 358 - SYS_SOCKET = 359 - SYS_SOCKETPAIR = 360 - SYS_BIND = 361 - SYS_CONNECT = 362 - SYS_LISTEN = 363 - SYS_ACCEPT4 = 364 - SYS_GETSOCKOPT = 365 - SYS_SETSOCKOPT = 366 - SYS_GETSOCKNAME = 367 - SYS_GETPEERNAME = 368 - SYS_SENDTO = 369 - SYS_SENDMSG = 370 - SYS_RECVFROM = 371 - SYS_RECVMSG = 372 - SYS_SHUTDOWN = 373 - SYS_MLOCK2 = 374 - SYS_COPY_FILE_RANGE = 375 - SYS_PREADV2 = 376 - SYS_PWRITEV2 = 377 - SYS_S390_GUARDED_STORAGE = 378 - SYS_STATX = 379 - SYS_S390_STHYI = 380 - SYS_KEXEC_FILE_LOAD = 381 - SYS_IO_PGETEVENTS = 382 - SYS_RSEQ = 383 - SYS_PKEY_MPROTECT = 384 - SYS_PKEY_ALLOC = 385 - SYS_PKEY_FREE = 386 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_RESTART_SYSCALL = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_BRK = 45 + SYS_SIGNAL = 48 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_SETPGID = 57 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_SYMLINK = 83 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_LOOKUP_DCOOKIE = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_GETDENTS = 141 + SYS_SELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_QUERY_MODULE = 167 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_GETPMSG = 188 + SYS_PUTPMSG = 189 + SYS_VFORK = 190 + SYS_GETRLIMIT = 191 + SYS_LCHOWN = 198 + SYS_GETUID = 199 + SYS_GETGID = 200 + SYS_GETEUID = 201 + SYS_GETEGID = 202 + SYS_SETREUID = 203 + SYS_SETREGID = 204 + SYS_GETGROUPS = 205 + SYS_SETGROUPS = 206 + SYS_FCHOWN = 207 + SYS_SETRESUID = 208 + SYS_GETRESUID = 209 + SYS_SETRESGID = 210 + SYS_GETRESGID = 211 + SYS_CHOWN = 212 + SYS_SETUID = 213 + SYS_SETGID = 214 + SYS_SETFSUID = 215 + SYS_SETFSGID = 216 + SYS_PIVOT_ROOT = 217 + SYS_MINCORE = 218 + SYS_MADVISE = 219 + SYS_GETDENTS64 = 220 + SYS_READAHEAD = 222 + SYS_SETXATTR = 224 + SYS_LSETXATTR = 225 + SYS_FSETXATTR = 226 + SYS_GETXATTR = 227 + SYS_LGETXATTR = 228 + SYS_FGETXATTR = 229 + SYS_LISTXATTR = 230 + SYS_LLISTXATTR = 231 + SYS_FLISTXATTR = 232 + SYS_REMOVEXATTR = 233 + SYS_LREMOVEXATTR = 234 + SYS_FREMOVEXATTR = 235 + SYS_GETTID = 236 + SYS_TKILL = 237 + SYS_FUTEX = 238 + SYS_SCHED_SETAFFINITY = 239 + SYS_SCHED_GETAFFINITY = 240 + SYS_TGKILL = 241 + SYS_IO_SETUP = 243 + SYS_IO_DESTROY = 244 + SYS_IO_GETEVENTS = 245 + SYS_IO_SUBMIT = 246 + SYS_IO_CANCEL = 247 + SYS_EXIT_GROUP = 248 + SYS_EPOLL_CREATE = 249 + SYS_EPOLL_CTL = 250 + SYS_EPOLL_WAIT = 251 + SYS_SET_TID_ADDRESS = 252 + SYS_FADVISE64 = 253 + SYS_TIMER_CREATE = 254 + SYS_TIMER_SETTIME = 255 + SYS_TIMER_GETTIME = 256 + SYS_TIMER_GETOVERRUN = 257 + SYS_TIMER_DELETE = 258 + SYS_CLOCK_SETTIME = 259 + SYS_CLOCK_GETTIME = 260 + SYS_CLOCK_GETRES = 261 + SYS_CLOCK_NANOSLEEP = 262 + SYS_STATFS64 = 265 + SYS_FSTATFS64 = 266 + SYS_REMAP_FILE_PAGES = 267 + SYS_MBIND = 268 + SYS_GET_MEMPOLICY = 269 + SYS_SET_MEMPOLICY = 270 + SYS_MQ_OPEN = 271 + SYS_MQ_UNLINK = 272 + SYS_MQ_TIMEDSEND = 273 + SYS_MQ_TIMEDRECEIVE = 274 + SYS_MQ_NOTIFY = 275 + SYS_MQ_GETSETATTR = 276 + SYS_KEXEC_LOAD = 277 + SYS_ADD_KEY = 278 + SYS_REQUEST_KEY = 279 + SYS_KEYCTL = 280 + SYS_WAITID = 281 + SYS_IOPRIO_SET = 282 + SYS_IOPRIO_GET = 283 + SYS_INOTIFY_INIT = 284 + SYS_INOTIFY_ADD_WATCH = 285 + SYS_INOTIFY_RM_WATCH = 286 + SYS_MIGRATE_PAGES = 287 + SYS_OPENAT = 288 + SYS_MKDIRAT = 289 + SYS_MKNODAT = 290 + SYS_FCHOWNAT = 291 + SYS_FUTIMESAT = 292 + SYS_NEWFSTATAT = 293 + SYS_UNLINKAT = 294 + SYS_RENAMEAT = 295 + SYS_LINKAT = 296 + SYS_SYMLINKAT = 297 + SYS_READLINKAT = 298 + SYS_FCHMODAT = 299 + SYS_FACCESSAT = 300 + SYS_PSELECT6 = 301 + SYS_PPOLL = 302 + SYS_UNSHARE = 303 + SYS_SET_ROBUST_LIST = 304 + SYS_GET_ROBUST_LIST = 305 + SYS_SPLICE = 306 + SYS_SYNC_FILE_RANGE = 307 + SYS_TEE = 308 + SYS_VMSPLICE = 309 + SYS_MOVE_PAGES = 310 + SYS_GETCPU = 311 + SYS_EPOLL_PWAIT = 312 + SYS_UTIMES = 313 + SYS_FALLOCATE = 314 + SYS_UTIMENSAT = 315 + SYS_SIGNALFD = 316 + SYS_TIMERFD = 317 + SYS_EVENTFD = 318 + SYS_TIMERFD_CREATE = 319 + SYS_TIMERFD_SETTIME = 320 + SYS_TIMERFD_GETTIME = 321 + SYS_SIGNALFD4 = 322 + SYS_EVENTFD2 = 323 + SYS_INOTIFY_INIT1 = 324 + SYS_PIPE2 = 325 + SYS_DUP3 = 326 + SYS_EPOLL_CREATE1 = 327 + SYS_PREADV = 328 + SYS_PWRITEV = 329 + SYS_RT_TGSIGQUEUEINFO = 330 + SYS_PERF_EVENT_OPEN = 331 + SYS_FANOTIFY_INIT = 332 + SYS_FANOTIFY_MARK = 333 + SYS_PRLIMIT64 = 334 + SYS_NAME_TO_HANDLE_AT = 335 + SYS_OPEN_BY_HANDLE_AT = 336 + SYS_CLOCK_ADJTIME = 337 + SYS_SYNCFS = 338 + SYS_SETNS = 339 + SYS_PROCESS_VM_READV = 340 + SYS_PROCESS_VM_WRITEV = 341 + SYS_S390_RUNTIME_INSTR = 342 + SYS_KCMP = 343 + SYS_FINIT_MODULE = 344 + SYS_SCHED_SETATTR = 345 + SYS_SCHED_GETATTR = 346 + SYS_RENAMEAT2 = 347 + SYS_SECCOMP = 348 + SYS_GETRANDOM = 349 + SYS_MEMFD_CREATE = 350 + SYS_BPF = 351 + SYS_S390_PCI_MMIO_WRITE = 352 + SYS_S390_PCI_MMIO_READ = 353 + SYS_EXECVEAT = 354 + SYS_USERFAULTFD = 355 + SYS_MEMBARRIER = 356 + SYS_RECVMMSG = 357 + SYS_SENDMMSG = 358 + SYS_SOCKET = 359 + SYS_SOCKETPAIR = 360 + SYS_BIND = 361 + SYS_CONNECT = 362 + SYS_LISTEN = 363 + SYS_ACCEPT4 = 364 + SYS_GETSOCKOPT = 365 + SYS_SETSOCKOPT = 366 + SYS_GETSOCKNAME = 367 + SYS_GETPEERNAME = 368 + SYS_SENDTO = 369 + SYS_SENDMSG = 370 + SYS_RECVFROM = 371 + SYS_RECVMSG = 372 + SYS_SHUTDOWN = 373 + SYS_MLOCK2 = 374 + SYS_COPY_FILE_RANGE = 375 + SYS_PREADV2 = 376 + SYS_PWRITEV2 = 377 + SYS_S390_GUARDED_STORAGE = 378 + SYS_STATX = 379 + SYS_S390_STHYI = 380 + SYS_KEXEC_FILE_LOAD = 381 + SYS_IO_PGETEVENTS = 382 + SYS_RSEQ = 383 + SYS_PKEY_MPROTECT = 384 + SYS_PKEY_ALLOC = 385 + SYS_PKEY_FREE = 386 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 488ca848d..78802a5cf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -7,379 +7,383 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECV = 11 - SYS_CHDIR = 12 - SYS_CHOWN = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BRK = 17 - SYS_PERFCTR = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_CAPGET = 21 - SYS_CAPSET = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_VMSPLICE = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_SIGALTSTACK = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_STAT = 38 - SYS_SENDFILE = 39 - SYS_LSTAT = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_UMOUNT2 = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_MEMORY_ORDERING = 52 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_FSTAT = 62 - SYS_FSTAT64 = 63 - SYS_GETPAGESIZE = 64 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_PREAD64 = 67 - SYS_PWRITE64 = 68 - SYS_MMAP = 71 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_VHANGUP = 76 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_SETHOSTNAME = 88 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_ACCEPT = 99 - SYS_GETPRIORITY = 100 - SYS_RT_SIGRETURN = 101 - SYS_RT_SIGACTION = 102 - SYS_RT_SIGPROCMASK = 103 - SYS_RT_SIGPENDING = 104 - SYS_RT_SIGTIMEDWAIT = 105 - SYS_RT_SIGQUEUEINFO = 106 - SYS_RT_SIGSUSPEND = 107 - SYS_SETRESUID = 108 - SYS_GETRESUID = 109 - SYS_SETRESGID = 110 - SYS_GETRESGID = 111 - SYS_RECVMSG = 113 - SYS_SENDMSG = 114 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_GETCWD = 119 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_RECVFROM = 125 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_TRUNCATE = 129 - SYS_FTRUNCATE = 130 - SYS_FLOCK = 131 - SYS_LSTAT64 = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_STAT64 = 139 - SYS_SENDFILE64 = 140 - SYS_GETPEERNAME = 141 - SYS_FUTEX = 142 - SYS_GETTID = 143 - SYS_GETRLIMIT = 144 - SYS_SETRLIMIT = 145 - SYS_PIVOT_ROOT = 146 - SYS_PRCTL = 147 - SYS_PCICONFIG_READ = 148 - SYS_PCICONFIG_WRITE = 149 - SYS_GETSOCKNAME = 150 - SYS_INOTIFY_INIT = 151 - SYS_INOTIFY_ADD_WATCH = 152 - SYS_POLL = 153 - SYS_GETDENTS64 = 154 - SYS_INOTIFY_RM_WATCH = 156 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UMOUNT = 159 - SYS_SCHED_SET_AFFINITY = 160 - SYS_SCHED_GET_AFFINITY = 161 - SYS_GETDOMAINNAME = 162 - SYS_SETDOMAINNAME = 163 - SYS_UTRAP_INSTALL = 164 - SYS_QUOTACTL = 165 - SYS_SET_TID_ADDRESS = 166 - SYS_MOUNT = 167 - SYS_USTAT = 168 - SYS_SETXATTR = 169 - SYS_LSETXATTR = 170 - SYS_FSETXATTR = 171 - SYS_GETXATTR = 172 - SYS_LGETXATTR = 173 - SYS_GETDENTS = 174 - SYS_SETSID = 175 - SYS_FCHDIR = 176 - SYS_FGETXATTR = 177 - SYS_LISTXATTR = 178 - SYS_LLISTXATTR = 179 - SYS_FLISTXATTR = 180 - SYS_REMOVEXATTR = 181 - SYS_LREMOVEXATTR = 182 - SYS_SIGPENDING = 183 - SYS_QUERY_MODULE = 184 - SYS_SETPGID = 185 - SYS_FREMOVEXATTR = 186 - SYS_TKILL = 187 - SYS_EXIT_GROUP = 188 - SYS_UNAME = 189 - SYS_INIT_MODULE = 190 - SYS_PERSONALITY = 191 - SYS_REMAP_FILE_PAGES = 192 - SYS_EPOLL_CREATE = 193 - SYS_EPOLL_CTL = 194 - SYS_EPOLL_WAIT = 195 - SYS_IOPRIO_SET = 196 - SYS_GETPPID = 197 - SYS_SIGACTION = 198 - SYS_SGETMASK = 199 - SYS_SSETMASK = 200 - SYS_SIGSUSPEND = 201 - SYS_OLDLSTAT = 202 - SYS_USELIB = 203 - SYS_READDIR = 204 - SYS_READAHEAD = 205 - SYS_SOCKETCALL = 206 - SYS_SYSLOG = 207 - SYS_LOOKUP_DCOOKIE = 208 - SYS_FADVISE64 = 209 - SYS_FADVISE64_64 = 210 - SYS_TGKILL = 211 - SYS_WAITPID = 212 - SYS_SWAPOFF = 213 - SYS_SYSINFO = 214 - SYS_IPC = 215 - SYS_SIGRETURN = 216 - SYS_CLONE = 217 - SYS_IOPRIO_GET = 218 - SYS_ADJTIMEX = 219 - SYS_SIGPROCMASK = 220 - SYS_CREATE_MODULE = 221 - SYS_DELETE_MODULE = 222 - SYS_GET_KERNEL_SYMS = 223 - SYS_GETPGID = 224 - SYS_BDFLUSH = 225 - SYS_SYSFS = 226 - SYS_AFS_SYSCALL = 227 - SYS_SETFSUID = 228 - SYS_SETFSGID = 229 - SYS__NEWSELECT = 230 - SYS_SPLICE = 232 - SYS_STIME = 233 - SYS_STATFS64 = 234 - SYS_FSTATFS64 = 235 - SYS__LLSEEK = 236 - SYS_MLOCK = 237 - SYS_MUNLOCK = 238 - SYS_MLOCKALL = 239 - SYS_MUNLOCKALL = 240 - SYS_SCHED_SETPARAM = 241 - SYS_SCHED_GETPARAM = 242 - SYS_SCHED_SETSCHEDULER = 243 - SYS_SCHED_GETSCHEDULER = 244 - SYS_SCHED_YIELD = 245 - SYS_SCHED_GET_PRIORITY_MAX = 246 - SYS_SCHED_GET_PRIORITY_MIN = 247 - SYS_SCHED_RR_GET_INTERVAL = 248 - SYS_NANOSLEEP = 249 - SYS_MREMAP = 250 - SYS__SYSCTL = 251 - SYS_GETSID = 252 - SYS_FDATASYNC = 253 - SYS_NFSSERVCTL = 254 - SYS_SYNC_FILE_RANGE = 255 - SYS_CLOCK_SETTIME = 256 - SYS_CLOCK_GETTIME = 257 - SYS_CLOCK_GETRES = 258 - SYS_CLOCK_NANOSLEEP = 259 - SYS_SCHED_GETAFFINITY = 260 - SYS_SCHED_SETAFFINITY = 261 - SYS_TIMER_SETTIME = 262 - SYS_TIMER_GETTIME = 263 - SYS_TIMER_GETOVERRUN = 264 - SYS_TIMER_DELETE = 265 - SYS_TIMER_CREATE = 266 - SYS_VSERVER = 267 - SYS_IO_SETUP = 268 - SYS_IO_DESTROY = 269 - SYS_IO_SUBMIT = 270 - SYS_IO_CANCEL = 271 - SYS_IO_GETEVENTS = 272 - SYS_MQ_OPEN = 273 - SYS_MQ_UNLINK = 274 - SYS_MQ_TIMEDSEND = 275 - SYS_MQ_TIMEDRECEIVE = 276 - SYS_MQ_NOTIFY = 277 - SYS_MQ_GETSETATTR = 278 - SYS_WAITID = 279 - SYS_TEE = 280 - SYS_ADD_KEY = 281 - SYS_REQUEST_KEY = 282 - SYS_KEYCTL = 283 - SYS_OPENAT = 284 - SYS_MKDIRAT = 285 - SYS_MKNODAT = 286 - SYS_FCHOWNAT = 287 - SYS_FUTIMESAT = 288 - SYS_FSTATAT64 = 289 - SYS_UNLINKAT = 290 - SYS_RENAMEAT = 291 - SYS_LINKAT = 292 - SYS_SYMLINKAT = 293 - SYS_READLINKAT = 294 - SYS_FCHMODAT = 295 - SYS_FACCESSAT = 296 - SYS_PSELECT6 = 297 - SYS_PPOLL = 298 - SYS_UNSHARE = 299 - SYS_SET_ROBUST_LIST = 300 - SYS_GET_ROBUST_LIST = 301 - SYS_MIGRATE_PAGES = 302 - SYS_MBIND = 303 - SYS_GET_MEMPOLICY = 304 - SYS_SET_MEMPOLICY = 305 - SYS_KEXEC_LOAD = 306 - SYS_MOVE_PAGES = 307 - SYS_GETCPU = 308 - SYS_EPOLL_PWAIT = 309 - SYS_UTIMENSAT = 310 - SYS_SIGNALFD = 311 - SYS_TIMERFD_CREATE = 312 - SYS_EVENTFD = 313 - SYS_FALLOCATE = 314 - SYS_TIMERFD_SETTIME = 315 - SYS_TIMERFD_GETTIME = 316 - SYS_SIGNALFD4 = 317 - SYS_EVENTFD2 = 318 - SYS_EPOLL_CREATE1 = 319 - SYS_DUP3 = 320 - SYS_PIPE2 = 321 - SYS_INOTIFY_INIT1 = 322 - SYS_ACCEPT4 = 323 - SYS_PREADV = 324 - SYS_PWRITEV = 325 - SYS_RT_TGSIGQUEUEINFO = 326 - SYS_PERF_EVENT_OPEN = 327 - SYS_RECVMMSG = 328 - SYS_FANOTIFY_INIT = 329 - SYS_FANOTIFY_MARK = 330 - SYS_PRLIMIT64 = 331 - SYS_NAME_TO_HANDLE_AT = 332 - SYS_OPEN_BY_HANDLE_AT = 333 - SYS_CLOCK_ADJTIME = 334 - SYS_SYNCFS = 335 - SYS_SENDMMSG = 336 - SYS_SETNS = 337 - SYS_PROCESS_VM_READV = 338 - SYS_PROCESS_VM_WRITEV = 339 - SYS_KERN_FEATURES = 340 - SYS_KCMP = 341 - SYS_FINIT_MODULE = 342 - SYS_SCHED_SETATTR = 343 - SYS_SCHED_GETATTR = 344 - SYS_RENAMEAT2 = 345 - SYS_SECCOMP = 346 - SYS_GETRANDOM = 347 - SYS_MEMFD_CREATE = 348 - SYS_BPF = 349 - SYS_EXECVEAT = 350 - SYS_MEMBARRIER = 351 - SYS_USERFAULTFD = 352 - SYS_BIND = 353 - SYS_LISTEN = 354 - SYS_SETSOCKOPT = 355 - SYS_MLOCK2 = 356 - SYS_COPY_FILE_RANGE = 357 - SYS_PREADV2 = 358 - SYS_PWRITEV2 = 359 - SYS_STATX = 360 - SYS_IO_PGETEVENTS = 361 - SYS_PKEY_MPROTECT = 362 - SYS_PKEY_ALLOC = 363 - SYS_PKEY_FREE = 364 - SYS_RSEQ = 365 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECV = 11 + SYS_CHDIR = 12 + SYS_CHOWN = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BRK = 17 + SYS_PERFCTR = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_CAPGET = 21 + SYS_CAPSET = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_VMSPLICE = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_SIGALTSTACK = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_STAT = 38 + SYS_SENDFILE = 39 + SYS_LSTAT = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_UMOUNT2 = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_MEMORY_ORDERING = 52 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_FSTAT = 62 + SYS_FSTAT64 = 63 + SYS_GETPAGESIZE = 64 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_MMAP = 71 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_VHANGUP = 76 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_SETHOSTNAME = 88 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_ACCEPT = 99 + SYS_GETPRIORITY = 100 + SYS_RT_SIGRETURN = 101 + SYS_RT_SIGACTION = 102 + SYS_RT_SIGPROCMASK = 103 + SYS_RT_SIGPENDING = 104 + SYS_RT_SIGTIMEDWAIT = 105 + SYS_RT_SIGQUEUEINFO = 106 + SYS_RT_SIGSUSPEND = 107 + SYS_SETRESUID = 108 + SYS_GETRESUID = 109 + SYS_SETRESGID = 110 + SYS_GETRESGID = 111 + SYS_RECVMSG = 113 + SYS_SENDMSG = 114 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_GETCWD = 119 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_RECVFROM = 125 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_TRUNCATE = 129 + SYS_FTRUNCATE = 130 + SYS_FLOCK = 131 + SYS_LSTAT64 = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_STAT64 = 139 + SYS_SENDFILE64 = 140 + SYS_GETPEERNAME = 141 + SYS_FUTEX = 142 + SYS_GETTID = 143 + SYS_GETRLIMIT = 144 + SYS_SETRLIMIT = 145 + SYS_PIVOT_ROOT = 146 + SYS_PRCTL = 147 + SYS_PCICONFIG_READ = 148 + SYS_PCICONFIG_WRITE = 149 + SYS_GETSOCKNAME = 150 + SYS_INOTIFY_INIT = 151 + SYS_INOTIFY_ADD_WATCH = 152 + SYS_POLL = 153 + SYS_GETDENTS64 = 154 + SYS_INOTIFY_RM_WATCH = 156 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UMOUNT = 159 + SYS_SCHED_SET_AFFINITY = 160 + SYS_SCHED_GET_AFFINITY = 161 + SYS_GETDOMAINNAME = 162 + SYS_SETDOMAINNAME = 163 + SYS_UTRAP_INSTALL = 164 + SYS_QUOTACTL = 165 + SYS_SET_TID_ADDRESS = 166 + SYS_MOUNT = 167 + SYS_USTAT = 168 + SYS_SETXATTR = 169 + SYS_LSETXATTR = 170 + SYS_FSETXATTR = 171 + SYS_GETXATTR = 172 + SYS_LGETXATTR = 173 + SYS_GETDENTS = 174 + SYS_SETSID = 175 + SYS_FCHDIR = 176 + SYS_FGETXATTR = 177 + SYS_LISTXATTR = 178 + SYS_LLISTXATTR = 179 + SYS_FLISTXATTR = 180 + SYS_REMOVEXATTR = 181 + SYS_LREMOVEXATTR = 182 + SYS_SIGPENDING = 183 + SYS_QUERY_MODULE = 184 + SYS_SETPGID = 185 + SYS_FREMOVEXATTR = 186 + SYS_TKILL = 187 + SYS_EXIT_GROUP = 188 + SYS_UNAME = 189 + SYS_INIT_MODULE = 190 + SYS_PERSONALITY = 191 + SYS_REMAP_FILE_PAGES = 192 + SYS_EPOLL_CREATE = 193 + SYS_EPOLL_CTL = 194 + SYS_EPOLL_WAIT = 195 + SYS_IOPRIO_SET = 196 + SYS_GETPPID = 197 + SYS_SIGACTION = 198 + SYS_SGETMASK = 199 + SYS_SSETMASK = 200 + SYS_SIGSUSPEND = 201 + SYS_OLDLSTAT = 202 + SYS_USELIB = 203 + SYS_READDIR = 204 + SYS_READAHEAD = 205 + SYS_SOCKETCALL = 206 + SYS_SYSLOG = 207 + SYS_LOOKUP_DCOOKIE = 208 + SYS_FADVISE64 = 209 + SYS_FADVISE64_64 = 210 + SYS_TGKILL = 211 + SYS_WAITPID = 212 + SYS_SWAPOFF = 213 + SYS_SYSINFO = 214 + SYS_IPC = 215 + SYS_SIGRETURN = 216 + SYS_CLONE = 217 + SYS_IOPRIO_GET = 218 + SYS_ADJTIMEX = 219 + SYS_SIGPROCMASK = 220 + SYS_CREATE_MODULE = 221 + SYS_DELETE_MODULE = 222 + SYS_GET_KERNEL_SYMS = 223 + SYS_GETPGID = 224 + SYS_BDFLUSH = 225 + SYS_SYSFS = 226 + SYS_AFS_SYSCALL = 227 + SYS_SETFSUID = 228 + SYS_SETFSGID = 229 + SYS__NEWSELECT = 230 + SYS_SPLICE = 232 + SYS_STIME = 233 + SYS_STATFS64 = 234 + SYS_FSTATFS64 = 235 + SYS__LLSEEK = 236 + SYS_MLOCK = 237 + SYS_MUNLOCK = 238 + SYS_MLOCKALL = 239 + SYS_MUNLOCKALL = 240 + SYS_SCHED_SETPARAM = 241 + SYS_SCHED_GETPARAM = 242 + SYS_SCHED_SETSCHEDULER = 243 + SYS_SCHED_GETSCHEDULER = 244 + SYS_SCHED_YIELD = 245 + SYS_SCHED_GET_PRIORITY_MAX = 246 + SYS_SCHED_GET_PRIORITY_MIN = 247 + SYS_SCHED_RR_GET_INTERVAL = 248 + SYS_NANOSLEEP = 249 + SYS_MREMAP = 250 + SYS__SYSCTL = 251 + SYS_GETSID = 252 + SYS_FDATASYNC = 253 + SYS_NFSSERVCTL = 254 + SYS_SYNC_FILE_RANGE = 255 + SYS_CLOCK_SETTIME = 256 + SYS_CLOCK_GETTIME = 257 + SYS_CLOCK_GETRES = 258 + SYS_CLOCK_NANOSLEEP = 259 + SYS_SCHED_GETAFFINITY = 260 + SYS_SCHED_SETAFFINITY = 261 + SYS_TIMER_SETTIME = 262 + SYS_TIMER_GETTIME = 263 + SYS_TIMER_GETOVERRUN = 264 + SYS_TIMER_DELETE = 265 + SYS_TIMER_CREATE = 266 + SYS_VSERVER = 267 + SYS_IO_SETUP = 268 + SYS_IO_DESTROY = 269 + SYS_IO_SUBMIT = 270 + SYS_IO_CANCEL = 271 + SYS_IO_GETEVENTS = 272 + SYS_MQ_OPEN = 273 + SYS_MQ_UNLINK = 274 + SYS_MQ_TIMEDSEND = 275 + SYS_MQ_TIMEDRECEIVE = 276 + SYS_MQ_NOTIFY = 277 + SYS_MQ_GETSETATTR = 278 + SYS_WAITID = 279 + SYS_TEE = 280 + SYS_ADD_KEY = 281 + SYS_REQUEST_KEY = 282 + SYS_KEYCTL = 283 + SYS_OPENAT = 284 + SYS_MKDIRAT = 285 + SYS_MKNODAT = 286 + SYS_FCHOWNAT = 287 + SYS_FUTIMESAT = 288 + SYS_FSTATAT64 = 289 + SYS_UNLINKAT = 290 + SYS_RENAMEAT = 291 + SYS_LINKAT = 292 + SYS_SYMLINKAT = 293 + SYS_READLINKAT = 294 + SYS_FCHMODAT = 295 + SYS_FACCESSAT = 296 + SYS_PSELECT6 = 297 + SYS_PPOLL = 298 + SYS_UNSHARE = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_GET_ROBUST_LIST = 301 + SYS_MIGRATE_PAGES = 302 + SYS_MBIND = 303 + SYS_GET_MEMPOLICY = 304 + SYS_SET_MEMPOLICY = 305 + SYS_KEXEC_LOAD = 306 + SYS_MOVE_PAGES = 307 + SYS_GETCPU = 308 + SYS_EPOLL_PWAIT = 309 + SYS_UTIMENSAT = 310 + SYS_SIGNALFD = 311 + SYS_TIMERFD_CREATE = 312 + SYS_EVENTFD = 313 + SYS_FALLOCATE = 314 + SYS_TIMERFD_SETTIME = 315 + SYS_TIMERFD_GETTIME = 316 + SYS_SIGNALFD4 = 317 + SYS_EVENTFD2 = 318 + SYS_EPOLL_CREATE1 = 319 + SYS_DUP3 = 320 + SYS_PIPE2 = 321 + SYS_INOTIFY_INIT1 = 322 + SYS_ACCEPT4 = 323 + SYS_PREADV = 324 + SYS_PWRITEV = 325 + SYS_RT_TGSIGQUEUEINFO = 326 + SYS_PERF_EVENT_OPEN = 327 + SYS_RECVMMSG = 328 + SYS_FANOTIFY_INIT = 329 + SYS_FANOTIFY_MARK = 330 + SYS_PRLIMIT64 = 331 + SYS_NAME_TO_HANDLE_AT = 332 + SYS_OPEN_BY_HANDLE_AT = 333 + SYS_CLOCK_ADJTIME = 334 + SYS_SYNCFS = 335 + SYS_SENDMMSG = 336 + SYS_SETNS = 337 + SYS_PROCESS_VM_READV = 338 + SYS_PROCESS_VM_WRITEV = 339 + SYS_KERN_FEATURES = 340 + SYS_KCMP = 341 + SYS_FINIT_MODULE = 342 + SYS_SCHED_SETATTR = 343 + SYS_SCHED_GETATTR = 344 + SYS_RENAMEAT2 = 345 + SYS_SECCOMP = 346 + SYS_GETRANDOM = 347 + SYS_MEMFD_CREATE = 348 + SYS_BPF = 349 + SYS_EXECVEAT = 350 + SYS_MEMBARRIER = 351 + SYS_USERFAULTFD = 352 + SYS_BIND = 353 + SYS_LISTEN = 354 + SYS_SETSOCKOPT = 355 + SYS_MLOCK2 = 356 + SYS_COPY_FILE_RANGE = 357 + SYS_PREADV2 = 358 + SYS_PWRITEV2 = 359 + SYS_STATX = 360 + SYS_IO_PGETEVENTS = 361 + SYS_PKEY_MPROTECT = 362 + SYS_PKEY_ALLOC = 363 + SYS_PKEY_FREE = 364 + SYS_RSEQ = 365 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 72887abe5..4b73bb3b6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -452,6 +452,11 @@ type CanFilter struct { Mask uint32 } +type TCPRepairOpt struct { + Code uint32 + Val uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -484,6 +489,7 @@ const ( SizeofUcred = 0xc SizeofTCPInfo = 0x68 SizeofCanFilter = 0x8 + SizeofTCPRepairOpt = 0x8 ) const ( @@ -681,6 +687,16 @@ type NdMsg struct { Type uint8 } +const ( + ICMP_FILTER = 0x1 + + ICMPV6_FILTER = 0x1 + ICMPV6_FILTER_BLOCK = 0x1 + ICMPV6_FILTER_BLOCKOTHERS = 0x3 + ICMPV6_FILTER_PASS = 0x2 + ICMPV6_FILTER_PASSONLY = 0x4 +) + const ( SizeofSockFilter = 0x8 ) @@ -1001,7 +1017,7 @@ const ( PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 PERF_COUNT_SW_BPF_OUTPUT = 0xa - PERF_COUNT_SW_MAX = 0xb + PERF_COUNT_SW_MAX = 0xc PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 PERF_SAMPLE_TIME = 0x4 @@ -1773,6 +1789,8 @@ const ( NFPROTO_NUMPROTO = 0xd ) +const SO_ORIGINAL_DST = 0x50 + type Nfgenmsg struct { Nfgen_family uint8 Version uint8 @@ -2338,8 +2356,8 @@ const ( SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff + SOF_TIMESTAMPING_LAST = 0x8000 + SOF_TIMESTAMPING_MASK = 0xffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -2915,7 +2933,7 @@ const ( DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 - DEVLINK_CMD_MAX = 0x49 + DEVLINK_CMD_MAX = 0x4d DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3138,7 +3156,7 @@ const ( DEVLINK_ATTR_RELOAD_ACTION_INFO = 0xa2 DEVLINK_ATTR_RELOAD_ACTION_STATS = 0xa3 DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 0xa4 - DEVLINK_ATTR_MAX = 0xa4 + DEVLINK_ATTR_MAX = 0xa9 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3434,7 +3452,7 @@ const ( ETHTOOL_MSG_CABLE_TEST_ACT = 0x1a ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 0x1b ETHTOOL_MSG_TUNNEL_INFO_GET = 0x1c - ETHTOOL_MSG_USER_MAX = 0x1c + ETHTOOL_MSG_USER_MAX = 0x21 ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3465,7 +3483,7 @@ const ( ETHTOOL_MSG_CABLE_TEST_NTF = 0x1b ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 0x1c ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 0x1d - ETHTOOL_MSG_KERNEL_MAX = 0x1d + ETHTOOL_MSG_KERNEL_MAX = 0x22 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 235c62e46..72f2e96f3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -170,6 +170,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 99b1e5b6a..d5f018d13 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -173,6 +173,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index cc8bba791..675446d93 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -176,6 +176,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index fa8fe3a75..711d0711c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index e7fb8d9b7..c1131c741 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 2fa61d593..91d5574ff 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7f3639933..5d721497b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index f3c20cb86..a5addd06a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 885d27950..bb6b03dfc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -176,6 +176,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index a94eb8e18..7637243b7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 659e32ebd..a1a28e525 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ab8ec604f..e0a8a1362 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 3ec08237f..21d6e56c7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -173,6 +173,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 23d474470..0531e98f6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -177,6 +177,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 85effef9c..ad4aad279 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -440,3 +440,43 @@ const ( POLLWRBAND = 0x100 POLLWRNORM = 0x4 ) + +type fileObj struct { + Atim Timespec + Mtim Timespec + Ctim Timespec + Pad [3]uint64 + Name *int8 +} + +type portEvent struct { + Events int32 + Source uint16 + Pad uint16 + Object uint64 + User *byte +} + +const ( + PORT_SOURCE_AIO = 0x1 + PORT_SOURCE_TIMER = 0x2 + PORT_SOURCE_USER = 0x3 + PORT_SOURCE_FD = 0x4 + PORT_SOURCE_ALERT = 0x5 + PORT_SOURCE_MQ = 0x6 + PORT_SOURCE_FILE = 0x7 + PORT_ALERT_SET = 0x1 + PORT_ALERT_UPDATE = 0x2 + PORT_ALERT_INVALID = 0x3 + FILE_ACCESS = 0x1 + FILE_MODIFIED = 0x2 + FILE_ATTRIB = 0x4 + FILE_TRUNC = 0x100000 + FILE_NOFOLLOW = 0x10000000 + FILE_DELETE = 0x10 + FILE_RENAME_TO = 0x20 + FILE_RENAME_FROM = 0x40 + UNMOUNTED = 0x20000000 + MOUNTEDOVER = 0x40000000 + FILE_EXCEPTION = 0x60000070 +) diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 111c10d3a..d414ef13b 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -889,6 +889,7 @@ type WTS_SESSION_INFO struct { //sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken //sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW //sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory +//sys WTSGetActiveConsoleSessionId() (sessionID uint32) type ACL struct { aclRevision byte diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 1f733398e..17f03312d 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -680,7 +680,7 @@ const ( WTD_CHOICE_CERT = 5 WTD_STATEACTION_IGNORE = 0x00000000 - WTD_STATEACTION_VERIFY = 0x00000010 + WTD_STATEACTION_VERIFY = 0x00000001 WTD_STATEACTION_CLOSE = 0x00000002 WTD_STATEACTION_AUTO_CACHE = 0x00000003 WTD_STATEACTION_AUTO_CACHE_FLUSH = 0x00000004 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 148de0ffb..2083ec376 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -346,6 +346,7 @@ var ( procVirtualLock = modkernel32.NewProc("VirtualLock") procVirtualProtect = modkernel32.NewProc("VirtualProtect") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -2992,6 +2993,12 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { return } +func WTSGetActiveConsoleSessionId() (sessionID uint32) { + r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + sessionID = uint32(r0) + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { diff --git a/vendor/golang.org/x/term/term.go b/vendor/golang.org/x/term/term.go index 2a4ccf801..1f6a38fad 100644 --- a/vendor/golang.org/x/term/term.go +++ b/vendor/golang.org/x/term/term.go @@ -7,11 +7,11 @@ // // Putting a terminal into raw mode is the most common requirement: // -// oldState, err := term.MakeRaw(0) +// oldState, err := term.MakeRaw(int(os.Stdin.Fd())) // if err != nil { // panic(err) // } -// defer term.Restore(0, oldState) +// defer term.Restore(int(os.Stdin.Fd()), oldState) package term // State contains the state of a terminal. diff --git a/vendor/golang.org/x/term/term_solaris.go b/vendor/golang.org/x/term/term_solaris.go deleted file mode 100644 index b9da29744..000000000 --- a/vendor/golang.org/x/term/term_solaris.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import ( - "io" - "syscall" - - "golang.org/x/sys/unix" -) - -// State contains the state of a terminal. -type state struct { - termios unix.Termios -} - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermio(fd, unix.TCGETA) - return err == nil -} - -func readPassword(fd int) ([]byte, error) { - // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c - val, err := unix.IoctlGetTermios(fd, unix.TCGETS) - if err != nil { - return nil, err - } - oldState := *val - - newState := oldState - newState.Lflag &^= syscall.ECHO - newState.Lflag |= syscall.ICANON | syscall.ISIG - newState.Iflag |= syscall.ICRNL - err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) - if err != nil { - return nil, err - } - - defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) - - var buf [16]byte - var ret []byte - for { - n, err := syscall.Read(fd, buf[:]) - if err != nil { - return nil, err - } - if n == 0 { - if len(ret) == 0 { - return nil, io.EOF - } - break - } - if buf[n-1] == '\n' { - n-- - } - ret = append(ret, buf[:n]...) - if n < len(buf) { - break - } - } - - return ret, nil -} - -func makeRaw(fd int) (*State, error) { - // see http://cr.illumos.org/~webrev/andy_js/1060/ - termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) - if err != nil { - return nil, err - } - - oldState := State{state{termios: *termios}} - - termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON - termios.Oflag &^= unix.OPOST - termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN - termios.Cflag &^= unix.CSIZE | unix.PARENB - termios.Cflag |= unix.CS8 - termios.Cc[unix.VMIN] = 1 - termios.Cc[unix.VTIME] = 0 - - if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { - return nil, err - } - - return &oldState, nil -} - -func restore(fd int, oldState *State) error { - return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) -} - -func getState(fd int) (*State, error) { - termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) - if err != nil { - return nil, err - } - - return &State{state{termios: *termios}}, nil -} - -func getSize(fd int) (width, height int, err error) { - ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) - if err != nil { - return 0, 0, err - } - return int(ws.Col), int(ws.Row), nil -} diff --git a/vendor/golang.org/x/term/term_unix.go b/vendor/golang.org/x/term/term_unix.go index 6849b6ee5..a4e31ab1b 100644 --- a/vendor/golang.org/x/term/term_unix.go +++ b/vendor/golang.org/x/term/term_unix.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd zos +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package term diff --git a/vendor/golang.org/x/term/term_unix_aix.go b/vendor/golang.org/x/term/term_unix_aix.go deleted file mode 100644 index 2d5efd26a..000000000 --- a/vendor/golang.org/x/term/term_unix_aix.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS -const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/term/term_unix_linux.go b/vendor/golang.org/x/term/term_unix_linux.go deleted file mode 100644 index 2d5efd26a..000000000 --- a/vendor/golang.org/x/term/term_unix_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS -const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/term/term_unix_zos.go b/vendor/golang.org/x/term/term_unix_other.go similarity index 63% rename from vendor/golang.org/x/term/term_unix_zos.go rename to vendor/golang.org/x/term/term_unix_other.go index b85ab8998..1e8955c93 100644 --- a/vendor/golang.org/x/term/term_unix_zos.go +++ b/vendor/golang.org/x/term/term_unix_other.go @@ -1,7 +1,10 @@ -// Copyright 2020 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || linux || solaris || zos +// +build aix linux solaris zos + package term import "golang.org/x/sys/unix" diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index cf72ea990..5fe75b14c 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -9,6 +9,8 @@ import ( "go/ast" "reflect" "sort" + + "golang.org/x/tools/internal/typeparams" ) // An ApplyFunc is invoked by Apply for each node n, even if n is nil, @@ -437,7 +439,13 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. } default: - panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + if ix := typeparams.GetIndexExprData(n); ix != nil { + a.apply(n, "X", nil, ix.X) + // *ast.IndexExpr was handled above, so n must be an *ast.MultiIndexExpr. + a.applyList(n, "Indices") + } else { + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } } if a.post != nil && !a.post(&a.cursor) { diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 000000000..fc8beea5d --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,133 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +// +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "fmt" + "go/token" + "go/types" + "io" + "io/ioutil" + + "golang.org/x/tools/go/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the workspace layout conventions of go/build. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +func Find(importPath, srcDir string) (filename, path string) { + return gcimporter.FindPkg(importPath, srcDir) +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, err := gcimporter.FindExportData(buf) + // If we ever switch to a zip-like archive format with the ToC + // at the end, we can return the correct portion of export data, + // but for now we must return the entire rest of the file. + return buf, err +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// The package name is specified by path. +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The App Engine Go runtime v1.6 uses the old export data format. + // TODO(adonovan): delete once v1.7 has been around for a while. + if bytes.HasPrefix(data, []byte("package ")) { + return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + } + + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + if _, err := io.WriteString(out, "i"); err != nil { + return err + } + return gcimporter.IExportData(out, fset, pkg) +} + +// ReadBundle reads an export bundle from in, decodes it, and returns type +// information for the packages. +// File position information is added to fset. +// +// ReadBundle may inspect and add to the imports map to ensure that references +// within the export bundle to other packages are consistent. +// +// On return, the state of the reader is undefined. +// +// Experimental: This API is experimental and may change in the future. +func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("reading export bundle: %v", err) + } + return gcimporter.IImportBundle(fset, imports, data) +} + +// WriteBundle writes encoded type information for the specified packages to out. +// The FileSet provides file position information for named objects. +// +// Experimental: This API is experimental and may change in the future. +func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return gcimporter.IExportBundle(out, fset, pkgs) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 000000000..efe221e7e --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go new file mode 100644 index 000000000..a807d0aaa --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// If trace is set, debugging output is printed to std out. +const trace = false // default: false + +// Current export format version. Increase with each format change. +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !ast.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !ast.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if ast.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !ast.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go new file mode 100644 index 000000000..e9f73d14a --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -0,0 +1,1039 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line, 0) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*token.File +} + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we + // reserve maxlines positions per file. + const maxlines = 64 * 1024 + f := s.files[file] + if f == nil { + f = s.fset.AddFile(file, -1, maxlines) + s.files[file] = f + // Allocate the fake linebreak indices on first use. + // TODO(adonovan): opt: save ~512KB using a more complex scheme? + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + f.SetLines(fakeLines) + } + + if line > maxlines { + line = 1 + } + + // Treat the file as if it contained only newlines + // and column=1: use the line number as the offset. + return f.Pos(line - 1) +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go new file mode 100644 index 000000000..f33dc5613 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + size, err = strconv.Atoi(s) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// +func FindExportData(r *bufio.Reader) (hdr string, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, _, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + hdr = string(line) + + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go new file mode 100644 index 000000000..e8cba6b23 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -0,0 +1,1078 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, +// but it also contains the original source-based importer code for Go1.6. +// Once we stop supporting 1.6, we can remove that code. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" + +import ( + "bufio" + "errors" + "fmt" + "go/build" + "go/constant" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "text/scanner" +) + +// debugging/development support +const debug = false + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +// +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + id = path // make sure we have an id to print in error message + return + } + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// ImportData imports a package by reading the gc-generated export data, +// adds the corresponding package object to the packages map indexed by id, +// and returns the object. +// +// The packages map must contains all packages already imported. The data +// reader position must be the beginning of the export data section. The +// filename is only used in error messages. +// +// If packages[id] contains the completely imported package, that package +// can be used directly, and there is no need to call this function (but +// there is also no harm but for extra time used). +// +func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { + // support for parser error handling + defer func() { + switch r := recover().(type) { + case nil: + // nothing to do + case importError: + err = r + default: + panic(r) // internal error + } + }() + + var p parser + p.init(filename, id, data, packages) + pkg = p.parseExport() + + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +// +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + buf := bufio.NewReader(rc) + if hdr, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$\n": + // Work-around if we don't have a filename; happens only if lookup != nil. + // Either way, the filename is only needed for importer error messages, so + // this is fine. + if filename == "" { + filename = path + } + return ImportData(packages, filename, id, buf) + + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err = IImportData(fset, packages, data[1:], id) + } else { + _, pkg, err = BImportData(fset, packages, data, id) + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +// ---------------------------------------------------------------------------- +// Parser + +// TODO(gri) Imported objects don't have position information. +// Ideally use the debug table line info; alternatively +// create some fake position (or the position of the +// import). That way error messages referring to imported +// objects can print meaningful information. + +// parser parses the exports inside a gc compiler-produced +// object/archive file and populates its scope with the results. +type parser struct { + scanner scanner.Scanner + tok rune // current token + lit string // literal string; only valid for Ident, Int, String tokens + id string // package id of imported package + sharedPkgs map[string]*types.Package // package id -> package object (across importer) + localPkgs map[string]*types.Package // package id -> package object (just this package) +} + +func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { + p.scanner.Init(src) + p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } + p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments + p.scanner.Whitespace = 1<<'\t' | 1<<' ' + p.scanner.Filename = filename // for good error messages + p.next() + p.id = id + p.sharedPkgs = packages + if debug { + // check consistency of packages map + for _, pkg := range packages { + if pkg.Name() == "" { + fmt.Printf("no package name for %s\n", pkg.Path()) + } + } + } +} + +func (p *parser) next() { + p.tok = p.scanner.Scan() + switch p.tok { + case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': + p.lit = p.scanner.TokenText() + default: + p.lit = "" + } + if debug { + fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) + } +} + +func declTypeName(pkg *types.Package, name string) *types.TypeName { + scope := pkg.Scope() + if obj := scope.Lookup(name); obj != nil { + return obj.(*types.TypeName) + } + obj := types.NewTypeName(token.NoPos, pkg, name, nil) + // a named type may be referred to before the underlying type + // is known - set it up + types.NewNamed(obj, nil, nil) + scope.Insert(obj) + return obj +} + +// ---------------------------------------------------------------------------- +// Error handling + +// Internal errors are boxed as importErrors. +type importError struct { + pos scanner.Position + err error +} + +func (e importError) Error() string { + return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) +} + +func (p *parser) error(err interface{}) { + if s, ok := err.(string); ok { + err = errors.New(s) + } + // panic with a runtime.Error if err is not an error + panic(importError{p.scanner.Pos(), err.(error)}) +} + +func (p *parser) errorf(format string, args ...interface{}) { + p.error(fmt.Sprintf(format, args...)) +} + +func (p *parser) expect(tok rune) string { + lit := p.lit + if p.tok != tok { + p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) + } + p.next() + return lit +} + +func (p *parser) expectSpecial(tok string) { + sep := 'x' // not white space + i := 0 + for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + i++ + } + if i < len(tok) { + p.errorf("expected %q, got %q", tok, tok[0:i]) + } +} + +func (p *parser) expectKeyword(keyword string) { + lit := p.expect(scanner.Ident) + if lit != keyword { + p.errorf("expected keyword %s, got %q", keyword, lit) + } +} + +// ---------------------------------------------------------------------------- +// Qualified and unqualified names + +// PackageId = string_lit . +// +func (p *parser) parsePackageID() string { + id, err := strconv.Unquote(p.expect(scanner.String)) + if err != nil { + p.error(err) + } + // id == "" stands for the imported package id + // (only known at time of package installation) + if id == "" { + id = p.id + } + return id +} + +// PackageName = ident . +// +func (p *parser) parsePackageName() string { + return p.expect(scanner.Ident) +} + +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +func (p *parser) parseDotIdent() string { + ident := "" + if p.tok != scanner.Int { + sep := 'x' // not white space + for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { + ident += p.lit + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + } + } + if ident == "" { + p.expect(scanner.Ident) // use expect() for error handling + } + return ident +} + +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// +func (p *parser) parseQualifiedName() (id, name string) { + p.expect('@') + id = p.parsePackageID() + p.expect('.') + // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. + if p.tok == '?' { + p.next() + } else { + name = p.parseDotIdent() + } + return +} + +// getPkg returns the package for a given id. If the package is +// not found, create the package and add it to the p.localPkgs +// and p.sharedPkgs maps. name is the (expected) name of the +// package. If name == "", the package name is expected to be +// set later via an import clause in the export data. +// +// id identifies a package, usually by a canonical package path like +// "encoding/json" but possibly by a non-canonical import path like +// "./json". +// +func (p *parser) getPkg(id, name string) *types.Package { + // package unsafe is not in the packages maps - handle explicitly + if id == "unsafe" { + return types.Unsafe + } + + pkg := p.localPkgs[id] + if pkg == nil { + // first import of id from this package + pkg = p.sharedPkgs[id] + if pkg == nil { + // first import of id by this importer; + // add (possibly unnamed) pkg to shared packages + pkg = types.NewPackage(id, name) + p.sharedPkgs[id] = pkg + } + // add (possibly unnamed) pkg to local packages + if p.localPkgs == nil { + p.localPkgs = make(map[string]*types.Package) + } + p.localPkgs[id] = pkg + } else if name != "" { + // package exists already and we have an expected package name; + // make sure names match or set package name if necessary + if pname := pkg.Name(); pname == "" { + pkg.SetName(name) + } else if pname != name { + p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) + } + } + return pkg +} + +// parseExportedName is like parseQualifiedName, but +// the package id is resolved to an imported *types.Package. +// +func (p *parser) parseExportedName() (pkg *types.Package, name string) { + id, name := p.parseQualifiedName() + pkg = p.getPkg(id, "") + return +} + +// ---------------------------------------------------------------------------- +// Types + +// BasicType = identifier . +// +func (p *parser) parseBasicType() types.Type { + id := p.expect(scanner.Ident) + obj := types.Universe.Lookup(id) + if obj, ok := obj.(*types.TypeName); ok { + return obj.Type() + } + p.errorf("not a basic type: %s", id) + return nil +} + +// ArrayType = "[" int_lit "]" Type . +// +func (p *parser) parseArrayType(parent *types.Package) types.Type { + // "[" already consumed and lookahead known not to be "]" + lit := p.expect(scanner.Int) + p.expect(']') + elem := p.parseType(parent) + n, err := strconv.ParseInt(lit, 10, 64) + if err != nil { + p.error(err) + } + return types.NewArray(elem, n) +} + +// MapType = "map" "[" Type "]" Type . +// +func (p *parser) parseMapType(parent *types.Package) types.Type { + p.expectKeyword("map") + p.expect('[') + key := p.parseType(parent) + p.expect(']') + elem := p.parseType(parent) + return types.NewMap(key, elem) +} + +// Name = identifier | "?" | QualifiedName . +// +// For unqualified and anonymous names, the returned package is the parent +// package unless parent == nil, in which case the returned package is the +// package being imported. (The parent package is not nil if the name +// is an unqualified struct field or interface method name belonging to a +// type declared in another package.) +// +// For qualified names, the returned package is nil (and not created if +// it doesn't exist yet) unless materializePkg is set (which creates an +// unnamed package with valid package path). In the latter case, a +// subsequent import clause is expected to provide a name for the package. +// +func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { + pkg = parent + if pkg == nil { + pkg = p.sharedPkgs[p.id] + } + switch p.tok { + case scanner.Ident: + name = p.lit + p.next() + case '?': + // anonymous + p.next() + case '@': + // exported name prefixed with package path + pkg = nil + var id string + id, name = p.parseQualifiedName() + if materializePkg { + pkg = p.getPkg(id, "") + } + default: + p.error("name expected") + } + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +// Field = Name Type [ string_lit ] . +// +func (p *parser) parseField(parent *types.Package) (*types.Var, string) { + pkg, name := p.parseName(parent, true) + + if name == "_" { + // Blank fields should be package-qualified because they + // are unexported identifiers, but gc does not qualify them. + // Assuming that the ident belongs to the current package + // causes types to change during re-exporting, leading + // to spurious "can't assign A to B" errors from go/types. + // As a workaround, pretend all blank fields belong + // to the same unique dummy package. + const blankpkg = "<_>" + pkg = p.getPkg(blankpkg, blankpkg) + } + + typ := p.parseType(parent) + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + p.errorf("anonymous field expected") + } + anonymous = true + } + tag := "" + if p.tok == scanner.String { + s := p.expect(scanner.String) + var err error + tag, err = strconv.Unquote(s) + if err != nil { + p.errorf("invalid struct tag %s: %s", s, err) + } + } + return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag +} + +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . +// +func (p *parser) parseStructType(parent *types.Package) types.Type { + var fields []*types.Var + var tags []string + + p.expectKeyword("struct") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + fld, tag := p.parseField(parent) + if tag != "" && tags == nil { + tags = make([]string, i) + } + if tags != nil { + tags = append(tags, tag) + } + fields = append(fields, fld) + } + p.expect('}') + + return types.NewStruct(fields, tags) +} + +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// +func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { + _, name := p.parseName(nil, false) + // remove gc-specific parameter numbering + if i := strings.Index(name, "·"); i >= 0 { + name = name[:i] + } + if p.tok == '.' { + p.expectSpecial("...") + isVariadic = true + } + typ := p.parseType(nil) + if isVariadic { + typ = types.NewSlice(typ) + } + // ignore argument tag (e.g. "noescape") + if p.tok == scanner.String { + p.next() + } + // TODO(gri) should we provide a package? + par = types.NewVar(token.NoPos, nil, name, typ) + return +} + +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . +// +func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { + p.expect('(') + for p.tok != ')' && p.tok != scanner.EOF { + if len(list) > 0 { + p.expect(',') + } + par, variadic := p.parseParameter() + list = append(list, par) + if variadic { + if isVariadic { + p.error("... not on final argument") + } + isVariadic = true + } + } + p.expect(')') + + return +} + +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . +// +func (p *parser) parseSignature(recv *types.Var) *types.Signature { + params, isVariadic := p.parseParameters() + + // optional result type + var results []*types.Var + if p.tok == '(' { + var variadic bool + results, variadic = p.parseParameters() + if variadic { + p.error("... not permitted on result type") + } + } + + return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) +} + +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . +// +// The methods of embedded interfaces are always "inlined" +// by the compiler and thus embedded interfaces are never +// visible in the export data. +// +func (p *parser) parseInterfaceType(parent *types.Package) types.Type { + var methods []*types.Func + + p.expectKeyword("interface") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + pkg, name := p.parseName(parent, true) + sig := p.parseSignature(nil) + methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) + } + p.expect('}') + + // Complete requires the type's embedded interfaces to be fully defined, + // but we do not define any + return newInterface(methods, nil).Complete() +} + +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// +func (p *parser) parseChanType(parent *types.Package) types.Type { + dir := types.SendRecv + if p.tok == scanner.Ident { + p.expectKeyword("chan") + if p.tok == '<' { + p.expectSpecial("<-") + dir = types.SendOnly + } + } else { + p.expectSpecial("<-") + p.expectKeyword("chan") + dir = types.RecvOnly + } + elem := p.parseType(parent) + return types.NewChan(dir, elem) +} + +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . +// +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . +// +func (p *parser) parseType(parent *types.Package) types.Type { + switch p.tok { + case scanner.Ident: + switch p.lit { + default: + return p.parseBasicType() + case "struct": + return p.parseStructType(parent) + case "func": + // FuncType + p.next() + return p.parseSignature(nil) + case "interface": + return p.parseInterfaceType(parent) + case "map": + return p.parseMapType(parent) + case "chan": + return p.parseChanType(parent) + } + case '@': + // TypeName + pkg, name := p.parseExportedName() + return declTypeName(pkg, name).Type() + case '[': + p.next() // look ahead + if p.tok == ']' { + // SliceType + p.next() + return types.NewSlice(p.parseType(parent)) + } + return p.parseArrayType(parent) + case '*': + // PointerType + p.next() + return types.NewPointer(p.parseType(parent)) + case '<': + return p.parseChanType(parent) + case '(': + // "(" Type ")" + p.next() + typ := p.parseType(parent) + p.expect(')') + return typ + } + p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) + return nil +} + +// ---------------------------------------------------------------------------- +// Declarations + +// ImportDecl = "import" PackageName PackageId . +// +func (p *parser) parseImportDecl() { + p.expectKeyword("import") + name := p.parsePackageName() + p.getPkg(p.parsePackageID(), name) +} + +// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// +func (p *parser) parseInt() string { + s := "" + switch p.tok { + case '-': + s = "-" + p.next() + case '+': + p.next() + } + return s + p.expect(scanner.Int) +} + +// number = int_lit [ "p" int_lit ] . +// +func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { + // mantissa + mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) + if mant == nil { + panic("invalid mantissa") + } + + if p.lit == "p" { + // exponent (base 2) + p.next() + exp, err := strconv.ParseInt(p.parseInt(), 10, 0) + if err != nil { + p.error(err) + } + if exp < 0 { + denom := constant.MakeInt64(1) + denom = constant.Shift(denom, token.SHL, uint(-exp)) + typ = types.Typ[types.UntypedFloat] + val = constant.BinaryOp(mant, token.QUO, denom) + return + } + if exp > 0 { + mant = constant.Shift(mant, token.SHL, uint(exp)) + } + typ = types.Typ[types.UntypedFloat] + val = mant + return + } + + typ = types.Typ[types.UntypedInt] + val = mant + return +} + +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . +// +func (p *parser) parseConstDecl() { + p.expectKeyword("const") + pkg, name := p.parseExportedName() + + var typ0 types.Type + if p.tok != '=' { + // constant types are never structured - no need for parent type + typ0 = p.parseType(nil) + } + + p.expect('=') + var typ types.Type + var val constant.Value + switch p.tok { + case scanner.Ident: + // bool_lit + if p.lit != "true" && p.lit != "false" { + p.error("expected true or false") + } + typ = types.Typ[types.UntypedBool] + val = constant.MakeBool(p.lit == "true") + p.next() + + case '-', scanner.Int: + // int_lit + typ, val = p.parseNumber() + + case '(': + // complex_lit or rune_lit + p.next() + if p.tok == scanner.Char { + p.next() + p.expect('+') + typ = types.Typ[types.UntypedRune] + _, val = p.parseNumber() + p.expect(')') + break + } + _, re := p.parseNumber() + p.expect('+') + _, im := p.parseNumber() + p.expectKeyword("i") + p.expect(')') + typ = types.Typ[types.UntypedComplex] + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + case scanner.Char: + // rune_lit + typ = types.Typ[types.UntypedRune] + val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) + p.next() + + case scanner.String: + // string_lit + typ = types.Typ[types.UntypedString] + val = constant.MakeFromLiteral(p.lit, token.STRING, 0) + p.next() + + default: + p.errorf("expected literal got %s", scanner.TokenString(p.tok)) + } + + if typ0 == nil { + typ0 = typ + } + + pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) +} + +// TypeDecl = "type" ExportedName Type . +// +func (p *parser) parseTypeDecl() { + p.expectKeyword("type") + pkg, name := p.parseExportedName() + obj := declTypeName(pkg, name) + + // The type object may have been imported before and thus already + // have a type associated with it. We still need to parse the type + // structure, but throw it away if the object already has a type. + // This ensures that all imports refer to the same type object for + // a given type declaration. + typ := p.parseType(pkg) + + if name := obj.Type().(*types.Named); name.Underlying() == nil { + name.SetUnderlying(typ) + } +} + +// VarDecl = "var" ExportedName Type . +// +func (p *parser) parseVarDecl() { + p.expectKeyword("var") + pkg, name := p.parseExportedName() + typ := p.parseType(pkg) + pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) +} + +// Func = Signature [ Body ] . +// Body = "{" ... "}" . +// +func (p *parser) parseFunc(recv *types.Var) *types.Signature { + sig := p.parseSignature(recv) + if p.tok == '{' { + p.next() + for i := 1; i > 0; p.next() { + switch p.tok { + case '{': + i++ + case '}': + i-- + } + } + } + return sig +} + +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// +func (p *parser) parseMethodDecl() { + // "func" already consumed + p.expect('(') + recv, _ := p.parseParameter() // receiver + p.expect(')') + + // determine receiver base type object + base := deref(recv.Type()).(*types.Named) + + // parse method name, signature, and possibly inlined body + _, name := p.parseName(nil, false) + sig := p.parseFunc(recv) + + // methods always belong to the same package as the base type object + pkg := base.Obj().Pkg() + + // add method to type unless type was imported before + // and method exists already + // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. + base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) +} + +// FuncDecl = "func" ExportedName Func . +// +func (p *parser) parseFuncDecl() { + // "func" already consumed + pkg, name := p.parseExportedName() + typ := p.parseFunc(nil) + pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) +} + +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// +func (p *parser) parseDecl() { + if p.tok == scanner.Ident { + switch p.lit { + case "import": + p.parseImportDecl() + case "const": + p.parseConstDecl() + case "type": + p.parseTypeDecl() + case "var": + p.parseVarDecl() + case "func": + p.next() // look ahead + if p.tok == '(' { + p.parseMethodDecl() + } else { + p.parseFuncDecl() + } + } + } + p.expect('\n') +} + +// ---------------------------------------------------------------------------- +// Export + +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . +// +func (p *parser) parseExport() *types.Package { + p.expectKeyword("package") + name := p.parsePackageName() + if p.tok == scanner.Ident && p.lit == "safe" { + // package was compiled with -u option - ignore + p.next() + } + p.expect('\n') + + pkg := p.getPkg(p.id, name) + + for p.tok != '$' && p.tok != scanner.EOF { + p.parseDecl() + } + + if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { + // don't call next()/expect() since reading past the + // export data may cause scanner errors (e.g. NUL chars) + p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) + } + + if n := p.scanner.ErrorCount; n != 0 { + p.errorf("expected no scanner errors, got %d", n) + } + + // Record all locally referenced packages as imports. + var imports []*types.Package + for id, pkg2 := range p.localPkgs { + if pkg2.Name() == "" { + p.errorf("%s package has no name", id) + } + if id == p.id { + continue // avoid self-edge + } + imports = append(imports, pkg2) + } + sort.Sort(byPath(imports)) + pkg.SetImports(imports) + + // package was imported completely and without errors + pkg.MarkComplete() + + return pkg +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go new file mode 100644 index 000000000..d2fc8b6fa --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -0,0 +1,781 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "go/ast" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" +) + +// Current indexed export format version. Increase with each format change. +// 0: Go1.11 encoding +const iexportVersion = 0 + +// Current bundled export format version. Increase with each format change. +// 0: initial implementation +const bundleVersion = 0 + +// IExportData writes indexed export data for pkg to out. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + return iexportCommon(out, fset, false, []*types.Package{pkg}) +} + +// IExportBundle writes an indexed export bundle for pkgs to out. +func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return iexportCommon(out, fset, true, pkgs) +} + +func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, pkgs []*types.Package) (err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := iexporter{ + fset: fset, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + typIndex: map[types.Type]uint64{}, + } + if !bundle { + p.localpkg = pkgs[0] + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + for _, pkg := range pkgs { + scope := pkg.Scope() + for _, name := range scope.Names() { + if ast.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + if bundle { + // Ensure pkg and its imports are included in the index. + p.allPkgs[pkg] = true + for _, imp := range pkg.Imports() { + p.allPkgs[imp] = true + } + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + + if bundle { + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.pkg(pkg) + imps := pkg.Imports() + w.uint64(uint64(len(imps))) + for _, imp := range imps { + w.pkg(imp) + } + } + } + w.flush() + + // Assemble header. + var hdr intWriter + if bundle { + hdr.uint64(bundleVersion) + } + hdr.uint64(iexportVersion) + hdr.uint64(uint64(p.strings.Len())) + hdr.uint64(dataLen) + + // Flush output. + io.Copy(out, &hdr) + io.Copy(out, &p.strings) + io.Copy(out, &p.data0) + + return nil +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]types.Object{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + if w.p.localpkg != nil { + pkgObjs[w.p.localpkg] = nil + } + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].Name() < objs[j].Name() + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.Name()) + w.uint64(index[obj]) + } + } +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + + localpkg *types.Package + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + data0 intWriter + declIndex map[types.Object]uint64 + typIndex map[types.Type]uint64 +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + assert(obj.Pkg() != types.Unsafe) + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark n present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + panic(internalErrorf("unexpected method: %v", sig)) + } + w.tag('F') + w.pos(obj.Pos()) + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + break + } + + // Defined type. + w.tag('T') + w.pos(obj.Pos()) + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + t := obj.Type() + if types.IsInterface(t) { + break + } + + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedIdent(obj types.Object) { + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + + w.string(obj.Name()) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + switch t := t.(type) { + case *types.Named: + w.startType(definedType) + w.qualifiedIdent(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + w.setPkg(pkg, true) + + n := t.NumFields() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) + w.typ(f.Type(), pkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Embedded(i) + w.pos(f.Obj().Pos()) + w.typ(f.Obj().Type(), f.Obj().Pkg()) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + w.bool(constant.BoolVal(v)) + case types.IsInteger: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case types.IsFloat: + f := constantToFloat(v) + w.mpfloat(f, typ) + case types.IsComplex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case types.IsString: + w.string(constant.StringVal(v)) + default: + if b.Kind() == types.Invalid { + // package contains type errors + break + } + panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + x = constant.ToFloat(x) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go new file mode 100644 index 000000000..8ed8bc62d --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -0,0 +1,676 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "sort" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType +) + +// IImportData imports a package from the serialized package data +// and returns 0 and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { + pkgs, err := iimportCommon(fset, imports, data, false, path) + if err != nil { + return 0, nil, err + } + return 0, pkgs[0], nil +} + +// IImportBundle imports a set of packages from the serialized package bundle. +func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { + return iimportCommon(fset, imports, data, true, "") +} + +func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) { + const currentVersion = 1 + version := int64(-1) + defer func() { + if e := recover(); e != nil { + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + r := &intReader{bytes.NewReader(data), path} + + if bundle { + bundleVersion := r.uint64() + switch bundleVersion { + case bundleVersion: + default: + errorf("unknown bundle format version %d", bundleVersion) + } + } + + version = int64(r.uint64()) + switch version { + case currentVersion, 0: + default: + errorf("unknown iexport format version %d", version) + } + + sLen := int64(r.uint64()) + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + declData := data[whence+sLen : whence+sLen+dLen] + r.Seek(sLen+dLen, io.SeekCurrent) + + p := iimporter{ + ipath: path, + version: int(version), + + stringData: stringData, + stringCache: make(map[uint64]string), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + pkg := imports[pkgPath] + if pkg == nil { + pkg = types.NewPackage(pkgPath, pkgName) + imports[pkgPath] = pkg + } else if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + + p.pkgCache[pkgPathOff] = pkg + + nameIndex := make(map[string]uint64) + for nSyms := r.uint64(); nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + + if bundle { + pkgs = make([]*types.Package, r.uint64()) + for i := range pkgs { + pkg := p.pkgAt(r.uint64()) + imps := make([]*types.Package, r.uint64()) + for j := range imps { + imps[j] = p.pkgAt(r.uint64()) + } + pkg.SetImports(imps) + pkgs[i] = pkg + } + } else { + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + pkgs = pkgList[:1] + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + pkgs[0].SetImports(list) + } + + for _, pkg := range pkgs { + if pkg.Complete() { + continue + } + + names := make([]string, 0, len(p.pkgIndex[pkg])) + for name := range p.pkgIndex[pkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(pkg, name) + } + + // package was imported completely and without errors + pkg.MarkComplete() + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + return pkgs, nil +} + +type iimporter struct { + ipath string + version int + + stringData []byte + stringCache map[uint64]string + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + + fake fakeFileSet + interfaceList []*types.Interface +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if base == nil || !isInterface(t) { + p.typCache[off] = t + } + return t +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F': + sig := r.signature(nil) + + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + r.declare(obj) + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + msig := r.signature(recv) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + val = r.mpint(b) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(b *types.Basic) constant.Value { + signed, maxBytes := intSize(b) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + return constant.MakeInt64(v) + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + + buf := make([]byte, v) + io.ReadFull(&r.declReader, buf) + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { + buf[i], buf[j] = buf[j], buf[i] + } + + x := constant.MakeFromBytes(buf) + if signed && n&1 != 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +func (r *importReader) mpfloat(b *types.Basic) constant.Value { + x := r.mpint(b) + if constant.Sign(x) == 0 { + return x + } + + exp := r.int64() + switch { + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + // Ensure that the imported Kind is Float, else this constant may run into + // bitsize limits on overlarge integers. Eventually we can instead adopt + // the approach of CL 288632, but that CL relies on go/constant APIs that + // were introduced in go1.13. + // + // TODO(rFindley): sync the logic here with tip Go once we no longer + // support go1.12. + x = constant.ToFloat(x) + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + } + return x +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.version >= 1 { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) types.Type { + switch k := r.kind(); k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + + msig := r.signature(recv) + methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types.Var) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignature(recv, params, results, variadic) +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go new file mode 100644 index 000000000..8b163e3d0 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go new file mode 100644 index 000000000..49984f40f --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 000000000..18a002f82 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,49 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "context" + "fmt" + "go/types" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +var debug = false + +func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return nil, enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else { + return nil, friendlyErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return types.SizesFor(compiler, goarch), nil +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 000000000..4bfe28a51 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,221 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. + +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 000000000..7242a0a7d --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,101 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + exec "golang.org/x/sys/execabs" + "os" + "strings" +) + +// The Driver Protocol +// +// The driver, given the inputs to a call to Load, returns metadata about the packages specified. +// This allows for different build systems to support go/packages by telling go/packages how the +// packages' source is organized. +// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in +// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package +// documentation in doc.go for the full description of the patterns that need to be supported. +// A driver receives as a JSON-serialized driverRequest struct in standard input and will +// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. + +// driverRequest is used to provide the portion of Load's Config that is needed by a driver. +type driverRequest struct { + Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) + } + + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 000000000..0e1e7f11f --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1099 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/types" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + exec "golang.org/x/sys/execabs" + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/xerrors" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &driverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a driverResponse. +func (r *responseDeduper) addAll(dr *driverResponse) { + for _, pkg := range dr.Packages { + r.addPackage(pkg) + } + for _, root := range dr.Roots { + r.addRoot(root) + } +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + goVersionOnce sync.Once + goVersionError error + goVersion int // The X in Go 1.X. + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool +} + +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError +} + +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() + if err != nil { + panic(fmt.Sprintf("mustGetEnv: %v", err)) + } + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + } + + // Fill in response.Sizes asynchronously if necessary. + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + var sizes types.Sizes + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + // types.SizesFor always returns nil or a *types.StdSizes. + response.dr.Sizes, _ = sizes.(*types.StdSizes) + sizeswg.Done() + }() + } + + // Determine files requested in contains patterns + var containFiles []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := state.createDriverResponse(restPatterns...) + if err != nil { + return nil, err + } + response.addAll(dr) + } + + if len(containFiles) != 0 { + if err := state.runContainsQueries(response, containFiles); err != nil { + return nil, err + } + } + + // Only use go/packages' overlay processing if we're using a Go version + // below 1.16. Otherwise, go list handles it. + if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return nil, err + } + + var containsCandidates []string + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{{ + Kind: ListError, + Msg: fmt.Sprintf("package %s expected but not seen", id), + }}, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + // Add root for any package that matches a pattern. This applies only to + // packages that are modified by overlays, since they are not added as + // roots automatically. + for _, pattern := range restPatterns { + match := matchPattern(pattern) + for _, pkgID := range modifiedPkgs { + pkg, ok := response.seenPackages[pkgID] + if !ok { + continue + } + if match(pkg.PkgPath) { + response.addRoot(pkg.ID) + } + } + } + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + return response.dr, nil +} + +func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { + if len(pkgs) == 0 { + return nil + } + dr, err := state.createDriverResponse(pkgs...) + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return err + } + return state.addNeededOverlayPackages(response, needPkgs) +} + +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or the package is returned + // with errors, try to load the file as an ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { + var queryErr error + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { + return err // return the original error + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { + response, err := state.createDriverResponse(query) + if err != nil { + return nil, err + } + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + } + return response, nil +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *packagesinternal.PackageError + DepsErrors []*packagesinternal.PackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + buf, err := state.invokeGo("list", golistargs(state.cfg, words)...) + if err != nil { + return nil, err + } + seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) + // Decode the JSON and convert it to Package form. + var response driverResponse + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue + } + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 1 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] + if importingPkg == old.ImportPath { + // Using an older version of Go which put this package itself on top of import + // stack, instead of the importer. Look for importer in second from top + // position. + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) + } + importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] + } + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), + forTest: p.ForTest, + depsErrors: p.DepsErrors, + Module: p.Module, + } + + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if len(p.CompiledGoFiles) > len(p.GoFiles) { + // We need the cgo definitions, which are in the first + // CompiledGoFile after the non-cgo ones. This is a hack but there + // isn't currently a better way to find it. We also need the pure + // Go files and unprocessed cgo files, all of which are already + // in pkg.GoFiles. + cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] + pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) + } else { + // golang/go#38990: go list silently fails to do cgo processing + pkg.CompiledGoFiles = nil + pkg.Errors = append(pkg.Errors, Error{ + Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", + Kind: ListError, + }) + } + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Filter out any elements of CompiledGoFiles that are also in OtherFiles. + // We have to keep this workaround in place until go1.12 is a distant memory. + if len(pkg.OtherFiles) > 0 { + other := make(map[string]bool, len(pkg.OtherFiles)) + for _, f := range pkg.OtherFiles { + other[f] = true + } + + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if other[f] { + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.GoFiles = nil // ignore fake unsafe.go file + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + if len(pkg.CompiledGoFiles) == 0 { + pkg.CompiledGoFiles = pkg.GoFiles + } + + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + + if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, + }) + } + + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { + response.Packages = append(response.Packages, pkg) + } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) + + return &response, nil +} + +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if goV < 15 { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +func (state *golistState) getGoVersion() (int, error) { + state.goVersionOnce.Do(func() { + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. +func (state *golistState) getPkgPath(dir string) (string, bool, error) { + absDir, err := filepath.Abs(dir) + if err != nil { + return "", false, err + } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(absDir, rdir) { + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true, nil + } + return filepath.ToSlash(r), true, nil + } + return "", false, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func golistargs(cfg *Config, words []string) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + } + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { + cfg := state.cfg + return gocommand.Invocation{ + BuildFlags: cfg.BuildFlags, + ModFile: cfg.modFile, + ModFlag: cfg.modFlag, + CleanEnv: cfg.Env != nil, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + + // For Go versions 1.16 and above, `go list` accepts overlays directly via + // the -overlay flag. Set it, if it's available. + // + // The check for "list" is not necessarily required, but we should avoid + // getting the go version if possible. + if verb == "list" { + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + if goVersion >= 16 { + filename, cleanup, err := state.writeOverlays() + if err != nil { + return nil, err + } + defer cleanup() + inv.Overlay = filename + } + } + inv.Verb = verb + inv.Args = args + gocmdRunner := cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + if err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, xerrors.Errorf("couldn't run 'go': %w", err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, friendlyErr + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + // golang/go#36770: Handle case where cmd/go prints module download messages before the error. + msg := stderr.String() + for strings.HasPrefix(msg, "go: downloading") { + msg = msg[strings.IndexRune(msg, '\n')+1:] + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + msg := msg[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, friendlyErr + } + } + return stdout, nil +} + +// OverlayJSON is the format overlay files are expected to be in. +// The Replace map maps from overlaid paths to replacement paths: +// the Go command will forward all reads trying to open +// each overlaid path to its replacement path, or consider the overlaid +// path not to exist if the replacement path is empty. +// +// From golang/go#39958. +type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` +} + +// writeOverlays writes out files for go list's -overlay flag, as described +// above. +func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(state.cfg.Overlay) == 0 { + return "", func() {}, nil + } + dir, err := ioutil.TempDir("", "gopackages-*") + if err != nil { + return "", nil, err + } + // The caller must clean up this directory, unless this function returns an + // error. + cleanup = func() { + os.RemoveAll(dir) + } + defer func() { + if err != nil { + cleanup() + } + }() + overlays := map[string]string{} + for k, v := range state.cfg.Overlay { + // Create a unique filename for the overlaid files, to avoid + // creating nested directories. + noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") + f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + if err != nil { + return "", func() {}, err + } + if _, err := f.Write(v); err != nil { + return "", func() {}, err + } + if err := f.Close(); err != nil { + return "", func() {}, err + } + overlays[k] = f.Name() + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", func() {}, err + } + // Write out the overlay file that contains the filepath mappings. + filename = filepath.Join(dir, "overlay.json") + if err := ioutil.WriteFile(filename, b, 0665); err != nil { + return "", func() {}, err + } + return filename, cleanup, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + k, v := split[0], split[1] + env[k] = v + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 000000000..9576b472f --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,575 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - determining the correct package to add given a new import path +func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + pkgOfDir := make(map[string][]*Package) + for _, pkg := range response.dr.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + dir, err := commonDir(pkg.GoFiles) + if err != nil { + return nil, nil, err + } + if dir != "" { + pkgOfDir[dir] = append(pkgOfDir[dir], pkg) + } + } + + // If no new imports are added, it is safe to avoid loading any needPkgs. + // Otherwise, it's hard to tell which package is actually being loaded + // (due to vendoring) and whether any modified package will show up + // in the transitive set of dependencies (because new imports are added, + // potentially modifying the transitive set of dependencies). + var overlayAddsImports bool + + // If both a package and its test package are created by the overlay, we + // need the real package first. Process all non-test files before test + // files, and make the whole process deterministic while we're at it. + var overlayFiles []string + for opath := range state.cfg.Overlay { + overlayFiles = append(overlayFiles, opath) + } + sort.Slice(overlayFiles, func(i, j int) bool { + iTest := strings.HasSuffix(overlayFiles[i], "_test.go") + jTest := strings.HasSuffix(overlayFiles[j], "_test.go") + if iTest != jTest { + return !iTest // non-tests are before tests. + } + return overlayFiles[i] < overlayFiles[j] + }) + for _, opath := range overlayFiles { + contents := state.cfg.Overlay[opath] + base := filepath.Base(opath) + dir := filepath.Dir(opath) + var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant + var testVariantOf *Package // if opath is a test file, this is the package it is testing + var fileExists bool + isTestFile := strings.HasSuffix(opath, "_test.go") + pkgName, ok := extractPackageName(opath, contents) + if !ok { + // Don't bother adding a file that doesn't even have a parsable package statement + // to the overlay. + continue + } + // If all the overlay files belong to a different package, change the + // package name to that package. + maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) + nextPackage: + for _, p := range response.dr.Packages { + if pkgName != p.Name && p.ID != "command-line-arguments" { + continue + } + for _, f := range p.GoFiles { + if !sameFile(filepath.Dir(f), dir) { + continue + } + // Make sure to capture information on the package's test variant, if needed. + if isTestFile && !hasTestFiles(p) { + // TODO(matloob): Are there packages other than the 'production' variant + // of a package that this can match? This shouldn't match the test main package + // because the file is generated in another directory. + testVariantOf = p + continue nextPackage + } else if !isTestFile && hasTestFiles(p) { + // We're examining a test variant, but the overlaid file is + // a non-test file. Because the overlay implementation + // (currently) only adds a file to one package, skip this + // package, so that we can add the file to the production + // variant of the package. (https://golang.org/issue/36857 + // tracks handling overlays on both the production and test + // variant of a package). + continue nextPackage + } + if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // We have already seen the production version of the + // for which p is a test variant. + if hasTestFiles(p) { + testVariantOf = pkg + } + } + pkg = p + if filepath.Base(f) == base { + fileExists = true + } + } + } + // The overlay could have included an entirely new package or an + // ad-hoc package. An ad-hoc package is one that we have manually + // constructed from inadequate `go list` results for a file= query. + // It will have the ID command-line-arguments. + if pkg == nil || pkg.ID == "command-line-arguments" { + // Try to find the module or gopath dir the file is contained in. + // Then for modules, add the module opath to the beginning. + pkgPath, ok, err := state.getPkgPath(dir) + if err != nil { + return nil, nil, err + } + if !ok { + break + } + var forTest string // only set for x tests + isXTest := strings.HasSuffix(pkgName, "_test") + if isXTest { + forTest = pkgPath + pkgPath += "_test" + } + id := pkgPath + if isTestFile { + if isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) + } else { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) + } + } + if pkg != nil { + // TODO(rstambler): We should change the package's path and ID + // here. The only issue is that this messes with the roots. + } else { + // Try to reclaim a package with the same ID, if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package. + if pkg == nil { + pkg = &Package{ + PkgPath: pkgPath, + ID: id, + Name: pkgName, + Imports: make(map[string]*Package), + } + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } + } + if isXTest { + pkg.forTest = forTest + } + } + } + } + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, opath) + // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior + // if the file will be ignored due to its build tags. + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(opath, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue + } + for _, imp := range imports { + // TODO(rstambler): If the package is an x test and the import has + // a test variant, make sure to replace it. + if _, found := pkg.Imports[imp]; found { + continue + } + overlayAddsImports = true + id, ok := havePkgs[imp] + if !ok { + var err error + id, err = state.resolveImport(dir, imp) + if err != nil { + return nil, nil, err + } + } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as well. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } + } + } + + // toPkgPath guesses the package path given the id. + toPkgPath := func(sourceDir, id string) (string, error) { + if i := strings.IndexByte(id, ' '); i >= 0 { + return state.resolveImport(sourceDir, id[:i]) + } + return state.resolveImport(sourceDir, id) + } + + // Now that new packages have been created, do another pass to determine + // the new set of missing packages. + for _, pkg := range response.dr.Packages { + for _, imp := range pkg.Imports { + if len(pkg.GoFiles) == 0 { + return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) + } + pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) + if err != nil { + return nil, nil, err + } + if _, ok := havePkgs[pkgPath]; !ok { + needPkgsSet[pkgPath] = true + } + } + } + + if overlayAddsImports { + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +// resolveImport finds the ID of a package given its import path. +// In particular, it will find the right vendored copy when in GOPATH mode. +func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { + env, err := state.getEnv() + if err != nil { + return "", err + } + if env["GOMOD"] != "" { + return importPath, nil + } + + searchDir := sourceDir + for { + vendorDir := filepath.Join(searchDir, "vendor") + exists, ok := state.vendorDirs[vendorDir] + if !ok { + info, err := os.Stat(vendorDir) + exists = err == nil && info.IsDir() + state.vendorDirs[vendorDir] = exists + } + + if exists { + vendoredPath := filepath.Join(vendorDir, importPath) + if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { + // We should probably check for .go files here, but shame on anyone who fools us. + path, ok, err := state.getPkgPath(vendoredPath) + if err != nil { + return "", err + } + if ok { + return path, nil + } + } + } + + // We know we've hit the top of the filesystem when we Dir / and get /, + // or C:\ and get C:\, etc. + next := filepath.Dir(searchDir) + if next == searchDir { + break + } + searchDir = next + } + return importPath, nil +} + +func hasTestFiles(p *Package) bool { + for _, f := range p.GoFiles { + if strings.HasSuffix(f, "_test.go") { + return true + } + } + return false +} + +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() + if err != nil { + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. + // Editing files in the module cache isn't a great idea, so we don't + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } + } + roots := map[string]string{} + modules := map[string]string{} + var i int + for dec := json.NewDecoder(out); dec.More(); { + mod := new(gocommand.ModuleJSON) + if err := dec.Decode(mod); err != nil { + return nil, err + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } + } + i++ + } + return roots, nil +} + +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { + m := map[string]string{} + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" + } + return m, nil +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// reclaimPackage attempts to reuse a package that failed to load in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if pkg.ID != id { + return false + } + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + pkgName, ok := extractPackageName(filename, contents) + if !ok { + return false + } + pkg.Name = pkgName + pkg.Errors = nil + return true +} + +func extractPackageName(filename string, contents []byte) (string, bool) { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return "", false + } + return f.Name.Name, true +} + +// commonDir returns the directory that all files are in, "" if files is empty, +// or an error if they aren't in the same directory. +func commonDir(files []string) (string, error) { + seen := make(map[string]bool) + for _, f := range files { + seen[filepath.Dir(f)] = true + } + if len(seen) > 1 { + return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) + } + for k := range seen { + // seen has only one element; return it. + return k, nil + } + return "", nil // no files +} + +// It is possible that the files in the disk directory dir have a different package +// name from newName, which is deduced from the overlays. If they all have a different +// package name, and they all have the same package name, then that name becomes +// the package name. +// It returns true if it changes the package name, false otherwise. +func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { + names := make(map[string]int) + for _, p := range pkgsOfDir { + names[p.Name]++ + } + if len(names) != 1 { + // some files are in different packages + return + } + var oldName string + for k := range names { + oldName = k + } + if newName == oldName { + return + } + // We might have a case where all of the package names in the directory are + // the same, but the overlay file is for an x test, which belongs to its + // own package. If the x test does not yet exist on disk, we may not yet + // have its package name on disk, but we should not rename the packages. + // + // We use a heuristic to determine if this file belongs to an x test: + // The test file should have a package name whose package name has a _test + // suffix or looks like "newName_test". + maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") + if isTestFile && maybeXTest { + return + } + for _, p := range pkgsOfDir { + p.Name = newName + } +} + +// This function is copy-pasted from +// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. +// It should be deleted when we remove support for overlays from go/packages. +// +// NOTE: This does not handle any ./... or ./ style queries, as this function +// doesn't know the working directory. +// +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +// Unfortunately, there are two special cases. Quoting "go help packages": +// +// First, /... at the end of the pattern can match an empty string, +// so that net/... matches both net and packages in its subdirectories, like net/http. +// Second, any slash-separated pattern element containing a wildcard never +// participates in a match of the "vendor" element in the path of a vendored +// package, so that ./... does not match packages in subdirectories of +// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +// Note, however, that a directory named vendor that itself contains code +// is not a vendored package: cmd/vendor would be a command named vendor, +// and the pattern cmd/... matches it. +func matchPattern(pattern string) func(name string) bool { + // Convert pattern to regular expression. + // The strategy for the trailing /... is to nest it in an explicit ? expression. + // The strategy for the vendor exclusion is to change the unmatchable + // vendor strings to a disallowed code point (vendorChar) and to use + // "(anything but that codepoint)*" as the implementation of the ... wildcard. + // This is a bit complicated but the obvious alternative, + // namely a hand-written search like in most shell glob matchers, + // is too easy to make accidentally exponential. + // Using package regexp guarantees linear-time matching. + + const vendorChar = "\x00" + + if strings.Contains(pattern, vendorChar) { + return func(name string) bool { return false } + } + + re := regexp.QuoteMeta(pattern) + re = replaceVendor(re, vendorChar) + switch { + case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): + re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` + case re == vendorChar+`/\.\.\.`: + re = `(/vendor|/` + vendorChar + `/\.\.\.)` + case strings.HasSuffix(re, `/\.\.\.`): + re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` + } + re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) + + reg := regexp.MustCompile(`^` + re + `$`) + + return func(name string) bool { + if strings.Contains(name, vendorChar) { + return false + } + return reg.MatchString(replaceVendor(name, vendorChar)) + } +} + +// replaceVendor returns the result of replacing +// non-trailing vendor path elements in x with repl. +func replaceVendor(x, repl string) string { + if !strings.Contains(x, "vendor") { + return x + } + elem := strings.Split(x, "/") + for i := 0; i < len(elem)-1; i++ { + if elem[i] == "vendor" { + elem[i] = repl + } + } + return strings.Join(elem, "/") +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 000000000..7ea37e7ee --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportsFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportsFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return "LoadMode(0)" + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 000000000..8a1a2d681 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1239 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typesinternal" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to +// NeedExportFile to make it consistent with the Package field it's adding. + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportsFile adds ExportFile. + NeedExportsFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes + + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // Modifies CompiledGoFiles and Types, and has no effect on its own. + typecheckCgo + + // NeedModule adds Module. + NeedModule +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // gocmdRunner guards go command calls from concurrency errors. + gocmdRunner *gocommand.Runner + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the driverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = response.Sizes + return l.refine(response.Roots, response.Packages...) +} + +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + response, err := driver(cfg, patterns...) + if err != nil { + return response, err + } else if response.NotHandled { + return goListDriver(cfg, patterns...) + } + return response, nil +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // GoFiles lists the absolute file paths of the package's Go source files. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that are suitable for type checking. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string + + // depsErrors is the DepsErrors field from the go list response, if any. + depsErrors []*packagesinternal.PackageError + + // module is the module information for the package if it exists. + Module *Module +} + +// Module provides module information for a package. +type Module struct { + Path string // module path + Version string // module version + Replace *Module // replaced by this module + Time *time.Time // time version was created + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} + +// ModuleError holds errors loading a module. +type ModuleError struct { + Err string // the error itself +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } + packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + return p.(*Package).depsErrors + } + packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { + return config.(*Config).gocmdRunner + } + packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { + config.(*Config).gocmdRunner = runner + } + packagesinternal.SetModFile = func(config interface{}, value string) { + config.(*Config).modFile = value + } + packagesinternal.SetModFlag = func(config interface{}, value string) { + config.(*Config).modFlag = value + } + packagesinternal.TypecheckCgo = int(typecheckCgo) +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + IgnoredFiles: p.IgnoredFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Config.gocmdRunner == nil { + ld.Config.gocmdRunner = &gocommand.Runner{} + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type and +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range list { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + lpkg := &loaderPackage{ + Package: pkg, + needtypes: needtypes, + needsrc: needsrc, + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + // If NeedImports isn't set, the imports fields will all be zeroed out. + if ld.Mode&NeedImports != 0 { + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&NeedImports == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, + // to catch programs that use more than they request. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportsFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.requestedMode&NeedModule == 0 { + ld.pkgs[i].Module = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // The Diamond test exercises this case. + if !lpkg.needtypes && !lpkg.needsrc { + return + } + if !lpkg.needsrc { + ld.loadFromExportData(lpkg) + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + ld.loadFromExportData(lpkg) + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&NeedTypes == 0 { + return + } + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in non-initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { + appendError(Error{ + Msg: "typecheckCgo requires Go 1.15+", + Kind: ListError, + }) + return + } + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +// +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData returns type information for the specified +// package, loading it from an export data file on the first request. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the Package.Pkg field and the + // types.Package it points to, for each Package in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return tpkg, nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return nil, fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return nil, err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if viewLen != len(view) { + log.Fatalf("Unexpected package creation during export data loading") + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + + return tpkg, nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 { + // If NeedTypesInfo, go/packages needs to do typechecking itself so it can + // associate type info with the AST. To do so, we need the export data + // for dependencies, which means we need to ask for the direct dependencies. + // NeedImports is used to ask for the direct dependencies. + loadMode |= NeedImports + } + + if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 { + // With NeedDeps we need to load at least direct dependencies. + // NeedImports is used to ask for the direct dependencies. + loadMode |= NeedImports + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 000000000..a1dcc40b7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 0cebac6e6..713043680 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -14,7 +14,7 @@ import ( // It returns the X in Go 1.X. func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" - inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`} + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") // Unset any unneeded flags, and remove them from BuildFlags, if they're // present. diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 000000000..9702094c5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,28 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +import ( + "golang.org/x/tools/internal/gocommand" +) + +var GetForTest = func(p interface{}) string { return "" } +var GetDepsErrors = func(p interface{}) []*PackageError { return nil } + +type PackageError struct { + ImportStack []string // shortest path from package named on command line to this one + Pos string // position of error (if present, file:line:col) + Err string // the error itself +} + +var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } + +var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} + +var TypecheckCgo int + +var SetModFlag = func(config interface{}, value string) {} +var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 000000000..9fc6b4beb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,25 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams provides functions to work indirectly with type parameter +// data stored in go/ast and go/types objects, while these API are guarded by a +// build constraint. +// +// This package exists to make it easier for tools to work with generic code, +// while also compiling against older Go versions. +package typeparams + +import ( + "go/ast" + "go/token" +) + +// A IndexExprData holds data from both ast.IndexExpr and the new +// ast.MultiIndexExpr, which was introduced in Go 1.18. +type IndexExprData struct { + X ast.Expr // expression + Lbrack token.Pos // position of "[" + Indices []ast.Expr // index expressions + Rbrack token.Pos // position of "]" +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go b/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go new file mode 100644 index 000000000..e975e476f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go @@ -0,0 +1,93 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !typeparams || !go1.18 +// +build !typeparams !go1.18 + +package typeparams + +import ( + "go/ast" + "go/types" +) + +// NOTE: doc comments must be kept in sync with typeparams.go. + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = false + +// GetIndexExprData extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting IndexExprData will have exactly one +// index expression. For an ast.MultiIndexExpr (go1.18+), it may have a +// variable number of index expressions. +// +// For nodes that don't represent index expressions, GetIndexExprData returns +// nil. +func GetIndexExprData(n ast.Node) *IndexExprData { + if e, _ := n.(*ast.IndexExpr); e != nil { + return &IndexExprData{ + X: e.X, + Lbrack: e.Lbrack, + Indices: []ast.Expr{e.Index}, + Rbrack: e.Rbrack, + } + } + return nil +} + +// ForTypeDecl extracts the (possibly nil) type parameter node list from n. +func ForTypeDecl(*ast.TypeSpec) *ast.FieldList { + return nil +} + +// ForFuncDecl extracts the (possibly nil) type parameter node list from n. +func ForFuncDecl(*ast.FuncDecl) *ast.FieldList { + return nil +} + +// ForSignature extracts the (possibly empty) type parameter object list from +// sig. +func ForSignature(*types.Signature) []*types.TypeName { + return nil +} + +// IsComparable reports if iface is the comparable interface. +func IsComparable(*types.Interface) bool { + return false +} + +// IsConstraint reports whether iface may only be used as a type parameter +// constraint (i.e. has a type set or is the comparable interface). +func IsConstraint(*types.Interface) bool { + return false +} + +// ForNamed extracts the (possibly empty) type parameter object list from +// named. +func ForNamed(*types.Named) []*types.TypeName { + return nil +} + +// NamedTArgs extracts the (possibly empty) type argument list from named. +func NamedTArgs(*types.Named) []types.Type { + return nil +} + +// InitInferred initializes info to record inferred type information. +func InitInferred(*types.Info) { +} + +// GetInferred extracts inferred type information from info for e. +// +// The expression e may have an inferred type if it is an *ast.IndexExpr +// representing partial instantiation of a generic function type for which type +// arguments have been inferred using constraint type inference, or if it is an +// *ast.CallExpr for which type type arguments have be inferred using both +// constraint type inference and function argument inference. +func GetInferred(*types.Info, ast.Expr) ([]types.Type, *types.Signature) { + return nil, nil +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams.go new file mode 100644 index 000000000..2e9421024 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams.go @@ -0,0 +1,125 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build typeparams && go1.18 +// +build typeparams,go1.18 + +package typeparams + +import ( + "go/ast" + "go/types" +) + +// NOTE: doc comments must be kept in sync with notypeparams.go. + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = true + +// GetIndexExprData extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting IndexExprData will have exactly one +// index expression. For an ast.MultiIndexExpr (go1.18+), it may have a +// variable number of index expressions. +// +// For nodes that don't represent index expressions, GetIndexExprData returns +// nil. +func GetIndexExprData(n ast.Node) *IndexExprData { + switch e := n.(type) { + case *ast.IndexExpr: + return &IndexExprData{ + X: e.X, + Lbrack: e.Lbrack, + Indices: []ast.Expr{e.Index}, + Rbrack: e.Rbrack, + } + case *ast.MultiIndexExpr: + return (*IndexExprData)(e) + } + return nil +} + +// ForTypeDecl extracts the (possibly nil) type parameter node list from n. +func ForTypeDecl(n *ast.TypeSpec) *ast.FieldList { + return n.TParams +} + +// ForFuncDecl extracts the (possibly nil) type parameter node list from n. +func ForFuncDecl(n *ast.FuncDecl) *ast.FieldList { + if n.Type != nil { + return n.Type.TParams + } + return nil +} + +// ForSignature extracts the (possibly empty) type parameter object list from +// sig. +func ForSignature(sig *types.Signature) []*types.TypeName { + return tparamsSlice(sig.TParams()) +} + +// IsComparable reports if iface is the comparable interface. +func IsComparable(iface *types.Interface) bool { + return iface.IsComparable() +} + +// IsConstraint reports whether iface may only be used as a type parameter +// constraint (i.e. has a type set or is the comparable interface). +func IsConstraint(iface *types.Interface) bool { + return iface.IsConstraint() +} + +// ForNamed extracts the (possibly empty) type parameter object list from +// named. +func ForNamed(named *types.Named) []*types.TypeName { + return tparamsSlice(named.TParams()) +} + +func tparamsSlice(tparams *types.TypeParams) []*types.TypeName { + if tparams.Len() == 0 { + return nil + } + result := make([]*types.TypeName, tparams.Len()) + for i := 0; i < tparams.Len(); i++ { + result[i] = tparams.At(i) + } + return result +} + +// NamedTArgs extracts the (possibly empty) type argument list from named. +func NamedTArgs(named *types.Named) []types.Type { + ntargs := named.NumTArgs() + if ntargs == 0 { + return nil + } + + targs := make([]types.Type, ntargs) + for i := 0; i < ntargs; i++ { + targs[i] = named.TArg(i) + } + + return targs +} + +// InitInferred initializes info to record inferred type information. +func InitInferred(info *types.Info) { + info.Inferred = make(map[ast.Expr]types.Inferred) +} + +// GetInferred extracts inferred type information from info for e. +// +// The expression e may have an inferred type if it is an *ast.IndexExpr +// representing partial instantiation of a generic function type for which type +// arguments have been inferred using constraint type inference, or if it is an +// *ast.CallExpr for which type type arguments have be inferred using both +// constraint type inference and function argument inference. +func GetInferred(info *types.Info, e ast.Expr) ([]types.Type, *types.Signature) { + if info.Inferred == nil { + return nil, nil + } + inf := info.Inferred[e] + return inf.TArgs, inf.Sig +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go new file mode 100644 index 000000000..fa2834e2a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -0,0 +1,1368 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +//go:generate stringer -type=ErrorCode + +type ErrorCode int + +// This file defines the error codes that can be produced during type-checking. +// Collectively, these codes provide an identifier that may be used to +// implement special handling for certain types of errors. +// +// Error codes should be fine-grained enough that the exact nature of the error +// can be easily determined, but coarse enough that they are not an +// implementation detail of the type checking algorithm. As a rule-of-thumb, +// errors should be considered equivalent if there is a theoretical refactoring +// of the type checker in which they are emitted in exactly one place. For +// example, the type checker emits different error messages for "too many +// arguments" and "too few arguments", but one can imagine an alternative type +// checker where this check instead just emits a single "wrong number of +// arguments", so these errors should have the same code. +// +// Error code names should be as brief as possible while retaining accuracy and +// distinctiveness. In most cases names should start with an adjective +// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), +// and end with a noun identifying the relevant language object. For example, +// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the +// convention that "bad" implies a problem with syntax, and "invalid" implies a +// problem with types. + +const ( + _ ErrorCode = iota + + // Test is reserved for errors that only apply while in self-test mode. + Test + + /* package names */ + + // BlankPkgName occurs when a package name is the blank identifier "_". + // + // Per the spec: + // "The PackageName must not be the blank identifier." + BlankPkgName + + // MismatchedPkgName occurs when a file's package name doesn't match the + // package name already established by other files. + MismatchedPkgName + + // InvalidPkgUse occurs when a package identifier is used outside of a + // selector expression. + // + // Example: + // import "fmt" + // + // var _ = fmt + InvalidPkgUse + + /* imports */ + + // BadImportPath occurs when an import path is not valid. + BadImportPath + + // BrokenImport occurs when importing a package fails. + // + // Example: + // import "amissingpackage" + BrokenImport + + // ImportCRenamed occurs when the special import "C" is renamed. "C" is a + // pseudo-package, and must not be renamed. + // + // Example: + // import _ "C" + ImportCRenamed + + // UnusedImport occurs when an import is unused. + // + // Example: + // import "fmt" + // + // func main() {} + UnusedImport + + /* initialization */ + + // InvalidInitCycle occurs when an invalid cycle is detected within the + // initialization graph. + // + // Example: + // var x int = f() + // + // func f() int { return x } + InvalidInitCycle + + /* decls */ + + // DuplicateDecl occurs when an identifier is declared multiple times. + // + // Example: + // var x = 1 + // var x = 2 + DuplicateDecl + + // InvalidDeclCycle occurs when a declaration cycle is not valid. + // + // Example: + // import "unsafe" + // + // type T struct { + // a [n]int + // } + // + // var n = unsafe.Sizeof(T{}) + InvalidDeclCycle + + // InvalidTypeCycle occurs when a cycle in type definitions results in a + // type that is not well-defined. + // + // Example: + // import "unsafe" + // + // type T [unsafe.Sizeof(T{})]int + InvalidTypeCycle + + /* decls > const */ + + // InvalidConstInit occurs when a const declaration has a non-constant + // initializer. + // + // Example: + // var x int + // const _ = x + InvalidConstInit + + // InvalidConstVal occurs when a const value cannot be converted to its + // target type. + // + // TODO(findleyr): this error code and example are not very clear. Consider + // removing it. + // + // Example: + // const _ = 1 << "hello" + InvalidConstVal + + // InvalidConstType occurs when the underlying type in a const declaration + // is not a valid constant type. + // + // Example: + // const c *int = 4 + InvalidConstType + + /* decls > var (+ other variable assignment codes) */ + + // UntypedNil occurs when the predeclared (untyped) value nil is used to + // initialize a variable declared without an explicit type. + // + // Example: + // var x = nil + UntypedNil + + // WrongAssignCount occurs when the number of values on the right-hand side + // of an assignment or or initialization expression does not match the number + // of variables on the left-hand side. + // + // Example: + // var x = 1, 2 + WrongAssignCount + + // UnassignableOperand occurs when the left-hand side of an assignment is + // not assignable. + // + // Example: + // func f() { + // const c = 1 + // c = 2 + // } + UnassignableOperand + + // NoNewVar occurs when a short variable declaration (':=') does not declare + // new variables. + // + // Example: + // func f() { + // x := 1 + // x := 2 + // } + NoNewVar + + // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does + // not have single-valued left-hand or right-hand side. + // + // Per the spec: + // "In assignment operations, both the left- and right-hand expression lists + // must contain exactly one single-valued expression" + // + // Example: + // func f() int { + // x, y := 1, 2 + // x, y += 1 + // return x + y + // } + MultiValAssignOp + + // InvalidIfaceAssign occurs when a value of type T is used as an + // interface, but T does not implement a method of the expected interface. + // + // Example: + // type I interface { + // f() + // } + // + // type T int + // + // var x I = T(1) + InvalidIfaceAssign + + // InvalidChanAssign occurs when a chan assignment is invalid. + // + // Per the spec, a value x is assignable to a channel type T if: + // "x is a bidirectional channel value, T is a channel type, x's type V and + // T have identical element types, and at least one of V or T is not a + // defined type." + // + // Example: + // type T1 chan int + // type T2 chan int + // + // var x T1 + // // Invalid assignment because both types are named + // var _ T2 = x + InvalidChanAssign + + // IncompatibleAssign occurs when the type of the right-hand side expression + // in an assignment cannot be assigned to the type of the variable being + // assigned. + // + // Example: + // var x []int + // var _ int = x + IncompatibleAssign + + // UnaddressableFieldAssign occurs when trying to assign to a struct field + // in a map value. + // + // Example: + // func f() { + // m := make(map[string]struct{i int}) + // m["foo"].i = 42 + // } + UnaddressableFieldAssign + + /* decls > type (+ other type expression codes) */ + + // NotAType occurs when the identifier used as the underlying type in a type + // declaration or the right-hand side of a type alias does not denote a type. + // + // Example: + // var S = 2 + // + // type T S + NotAType + + // InvalidArrayLen occurs when an array length is not a constant value. + // + // Example: + // var n = 3 + // var _ = [n]int{} + InvalidArrayLen + + // BlankIfaceMethod occurs when a method name is '_'. + // + // Per the spec: + // "The name of each explicitly specified method must be unique and not + // blank." + // + // Example: + // type T interface { + // _(int) + // } + BlankIfaceMethod + + // IncomparableMapKey occurs when a map key type does not support the == and + // != operators. + // + // Per the spec: + // "The comparison operators == and != must be fully defined for operands of + // the key type; thus the key type must not be a function, map, or slice." + // + // Example: + // var x map[T]int + // + // type T []int + IncomparableMapKey + + // InvalidIfaceEmbed occurs when a non-interface type is embedded in an + // interface. + // + // Example: + // type T struct {} + // + // func (T) m() + // + // type I interface { + // T + // } + InvalidIfaceEmbed + + // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, + // and T itself is itself a pointer, an unsafe.Pointer, or an interface. + // + // Per the spec: + // "An embedded field must be specified as a type name T or as a pointer to + // a non-interface type name *T, and T itself may not be a pointer type." + // + // Example: + // type T *int + // + // type S struct { + // *T + // } + InvalidPtrEmbed + + /* decls > func and method */ + + // BadRecv occurs when a method declaration does not have exactly one + // receiver parameter. + // + // Example: + // func () _() {} + BadRecv + + // InvalidRecv occurs when a receiver type expression is not of the form T + // or *T, or T is a pointer type. + // + // Example: + // type T struct {} + // + // func (**T) m() {} + InvalidRecv + + // DuplicateFieldAndMethod occurs when an identifier appears as both a field + // and method name. + // + // Example: + // type T struct { + // m int + // } + // + // func (T) m() {} + DuplicateFieldAndMethod + + // DuplicateMethod occurs when two methods on the same receiver type have + // the same name. + // + // Example: + // type T struct {} + // func (T) m() {} + // func (T) m(i int) int { return i } + DuplicateMethod + + /* decls > special */ + + // InvalidBlank occurs when a blank identifier is used as a value or type. + // + // Per the spec: + // "The blank identifier may appear as an operand only on the left-hand side + // of an assignment." + // + // Example: + // var x = _ + InvalidBlank + + // InvalidIota occurs when the predeclared identifier iota is used outside + // of a constant declaration. + // + // Example: + // var x = iota + InvalidIota + + // MissingInitBody occurs when an init function is missing its body. + // + // Example: + // func init() + MissingInitBody + + // InvalidInitSig occurs when an init function declares parameters or + // results. + // + // Example: + // func init() int { return 1 } + InvalidInitSig + + // InvalidInitDecl occurs when init is declared as anything other than a + // function. + // + // Example: + // var init = 1 + InvalidInitDecl + + // InvalidMainDecl occurs when main is declared as anything other than a + // function, in a main package. + InvalidMainDecl + + /* exprs */ + + // TooManyValues occurs when a function returns too many values for the + // expression context in which it is used. + // + // Example: + // func ReturnTwo() (int, int) { + // return 1, 2 + // } + // + // var x = ReturnTwo() + TooManyValues + + // NotAnExpr occurs when a type expression is used where a value expression + // is expected. + // + // Example: + // type T struct {} + // + // func f() { + // T + // } + NotAnExpr + + /* exprs > const */ + + // TruncatedFloat occurs when a float constant is truncated to an integer + // value. + // + // Example: + // var _ int = 98.6 + TruncatedFloat + + // NumericOverflow occurs when a numeric constant overflows its target type. + // + // Example: + // var x int8 = 1000 + NumericOverflow + + /* exprs > operation */ + + // UndefinedOp occurs when an operator is not defined for the type(s) used + // in an operation. + // + // Example: + // var c = "a" - "b" + UndefinedOp + + // MismatchedTypes occurs when operand types are incompatible in a binary + // operation. + // + // Example: + // var a = "hello" + // var b = 1 + // var c = a - b + MismatchedTypes + + // DivByZero occurs when a division operation is provable at compile + // time to be a division by zero. + // + // Example: + // const divisor = 0 + // var x int = 1/divisor + DivByZero + + // NonNumericIncDec occurs when an increment or decrement operator is + // applied to a non-numeric value. + // + // Example: + // func f() { + // var c = "c" + // c++ + // } + NonNumericIncDec + + /* exprs > ptr */ + + // UnaddressableOperand occurs when the & operator is applied to an + // unaddressable expression. + // + // Example: + // var x = &1 + UnaddressableOperand + + // InvalidIndirection occurs when a non-pointer value is indirected via the + // '*' operator. + // + // Example: + // var x int + // var y = *x + InvalidIndirection + + /* exprs > [] */ + + // NonIndexableOperand occurs when an index operation is applied to a value + // that cannot be indexed. + // + // Example: + // var x = 1 + // var y = x[1] + NonIndexableOperand + + // InvalidIndex occurs when an index argument is not of integer type, + // negative, or out-of-bounds. + // + // Example: + // var s = [...]int{1,2,3} + // var x = s[5] + // + // Example: + // var s = []int{1,2,3} + // var _ = s[-1] + // + // Example: + // var s = []int{1,2,3} + // var i string + // var _ = s[i] + InvalidIndex + + // SwappedSliceIndices occurs when constant indices in a slice expression + // are decreasing in value. + // + // Example: + // var _ = []int{1,2,3}[2:1] + SwappedSliceIndices + + /* operators > slice */ + + // NonSliceableOperand occurs when a slice operation is applied to a value + // whose type is not sliceable, or is unaddressable. + // + // Example: + // var x = [...]int{1, 2, 3}[:1] + // + // Example: + // var x = 1 + // var y = 1[:1] + NonSliceableOperand + + // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is + // applied to a string. + // + // Example: + // var s = "hello" + // var x = s[1:2:3] + InvalidSliceExpr + + /* exprs > shift */ + + // InvalidShiftCount occurs when the right-hand side of a shift operation is + // either non-integer, negative, or too large. + // + // Example: + // var ( + // x string + // y int = 1 << x + // ) + InvalidShiftCount + + // InvalidShiftOperand occurs when the shifted operand is not an integer. + // + // Example: + // var s = "hello" + // var x = s << 2 + InvalidShiftOperand + + /* exprs > chan */ + + // InvalidReceive occurs when there is a channel receive from a value that + // is either not a channel, or is a send-only channel. + // + // Example: + // func f() { + // var x = 1 + // <-x + // } + InvalidReceive + + // InvalidSend occurs when there is a channel send to a value that is not a + // channel, or is a receive-only channel. + // + // Example: + // func f() { + // var x = 1 + // x <- "hello!" + // } + InvalidSend + + /* exprs > literal */ + + // DuplicateLitKey occurs when an index is duplicated in a slice, array, or + // map literal. + // + // Example: + // var _ = []int{0:1, 0:2} + // + // Example: + // var _ = map[string]int{"a": 1, "a": 2} + DuplicateLitKey + + // MissingLitKey occurs when a map literal is missing a key expression. + // + // Example: + // var _ = map[string]int{1} + MissingLitKey + + // InvalidLitIndex occurs when the key in a key-value element of a slice or + // array literal is not an integer constant. + // + // Example: + // var i = 0 + // var x = []string{i: "world"} + InvalidLitIndex + + // OversizeArrayLit occurs when an array literal exceeds its length. + // + // Example: + // var _ = [2]int{1,2,3} + OversizeArrayLit + + // MixedStructLit occurs when a struct literal contains a mix of positional + // and named elements. + // + // Example: + // var _ = struct{i, j int}{i: 1, 2} + MixedStructLit + + // InvalidStructLit occurs when a positional struct literal has an incorrect + // number of values. + // + // Example: + // var _ = struct{i, j int}{1,2,3} + InvalidStructLit + + // MissingLitField occurs when a struct literal refers to a field that does + // not exist on the struct type. + // + // Example: + // var _ = struct{i int}{j: 2} + MissingLitField + + // DuplicateLitField occurs when a struct literal contains duplicated + // fields. + // + // Example: + // var _ = struct{i int}{i: 1, i: 2} + DuplicateLitField + + // UnexportedLitField occurs when a positional struct literal implicitly + // assigns an unexported field of an imported type. + UnexportedLitField + + // InvalidLitField occurs when a field name is not a valid identifier. + // + // Example: + // var _ = struct{i int}{1: 1} + InvalidLitField + + // UntypedLit occurs when a composite literal omits a required type + // identifier. + // + // Example: + // type outer struct{ + // inner struct { i int } + // } + // + // var _ = outer{inner: {1}} + UntypedLit + + // InvalidLit occurs when a composite literal expression does not match its + // type. + // + // Example: + // type P *struct{ + // x int + // } + // var _ = P {} + InvalidLit + + /* exprs > selector */ + + // AmbiguousSelector occurs when a selector is ambiguous. + // + // Example: + // type E1 struct { i int } + // type E2 struct { i int } + // type T struct { E1; E2 } + // + // var x T + // var _ = x.i + AmbiguousSelector + + // UndeclaredImportedName occurs when a package-qualified identifier is + // undeclared by the imported package. + // + // Example: + // import "go/types" + // + // var _ = types.NotAnActualIdentifier + UndeclaredImportedName + + // UnexportedName occurs when a selector refers to an unexported identifier + // of an imported package. + // + // Example: + // import "reflect" + // + // type _ reflect.flag + UnexportedName + + // UndeclaredName occurs when an identifier is not declared in the current + // scope. + // + // Example: + // var x T + UndeclaredName + + // MissingFieldOrMethod occurs when a selector references a field or method + // that does not exist. + // + // Example: + // type T struct {} + // + // var x = T{}.f + MissingFieldOrMethod + + /* exprs > ... */ + + // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is + // not valid. + // + // Example: + // var _ = map[int][...]int{0: {}} + BadDotDotDotSyntax + + // NonVariadicDotDotDot occurs when a "..." is used on the final argument to + // a non-variadic function. + // + // Example: + // func printArgs(s []string) { + // for _, a := range s { + // println(a) + // } + // } + // + // func f() { + // s := []string{"a", "b", "c"} + // printArgs(s...) + // } + NonVariadicDotDotDot + + // MisplacedDotDotDot occurs when a "..." is used somewhere other than the + // final argument to a function call. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := []int{1,2,3} + // printArgs(0, a...) + // } + MisplacedDotDotDot + + // InvalidDotDotDotOperand occurs when a "..." operator is applied to a + // single-valued operand. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := 1 + // printArgs(a...) + // } + // + // Example: + // func args() (int, int) { + // return 1, 2 + // } + // + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func g() { + // printArgs(args()...) + // } + InvalidDotDotDotOperand + + // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in + // function. + // + // Example: + // var s = []int{1, 2, 3} + // var l = len(s...) + InvalidDotDotDot + + /* exprs > built-in */ + + // UncalledBuiltin occurs when a built-in function is used as a + // function-valued expression, instead of being called. + // + // Per the spec: + // "The built-in functions do not have standard Go types, so they can only + // appear in call expressions; they cannot be used as function values." + // + // Example: + // var _ = copy + UncalledBuiltin + + // InvalidAppend occurs when append is called with a first argument that is + // not a slice. + // + // Example: + // var _ = append(1, 2) + InvalidAppend + + // InvalidCap occurs when an argument to the cap built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = cap(s) + InvalidCap + + // InvalidClose occurs when close(...) is called with an argument that is + // not of channel type, or that is a receive-only channel. + // + // Example: + // func f() { + // var x int + // close(x) + // } + InvalidClose + + // InvalidCopy occurs when the arguments are not of slice type or do not + // have compatible type. + // + // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // information on the type requirements for the copy built-in. + // + // Example: + // func f() { + // var x []int + // y := []int64{1,2,3} + // copy(x, y) + // } + InvalidCopy + + // InvalidComplex occurs when the complex built-in function is called with + // arguments with incompatible types. + // + // Example: + // var _ = complex(float32(1), float64(2)) + InvalidComplex + + // InvalidDelete occurs when the delete built-in function is called with a + // first argument that is not a map. + // + // Example: + // func f() { + // m := "hello" + // delete(m, "e") + // } + InvalidDelete + + // InvalidImag occurs when the imag built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = imag(int(1)) + InvalidImag + + // InvalidLen occurs when an argument to the len built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = len(s) + InvalidLen + + // SwappedMakeArgs occurs when make is called with three arguments, and its + // length argument is larger than its capacity argument. + // + // Example: + // var x = make([]int, 3, 2) + SwappedMakeArgs + + // InvalidMake occurs when make is called with an unsupported type argument. + // + // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // information on the types that may be created using make. + // + // Example: + // var x = make(int) + InvalidMake + + // InvalidReal occurs when the real built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = real(int(1)) + InvalidReal + + /* exprs > assertion */ + + // InvalidAssert occurs when a type assertion is applied to a + // value that is not of interface type. + // + // Example: + // var x = 1 + // var _ = x.(float64) + InvalidAssert + + // ImpossibleAssert occurs for a type assertion x.(T) when the value x of + // interface cannot have dynamic type T, due to a missing or mismatching + // method on T. + // + // Example: + // type T int + // + // func (t *T) m() int { return int(*t) } + // + // type I interface { m() int } + // + // var x I + // var _ = x.(T) + ImpossibleAssert + + /* exprs > conversion */ + + // InvalidConversion occurs when the argument type cannot be converted to the + // target. + // + // See https://golang.org/ref/spec#Conversions for the rules of + // convertibility. + // + // Example: + // var x float64 + // var _ = string(x) + InvalidConversion + + // InvalidUntypedConversion occurs when an there is no valid implicit + // conversion from an untyped value satisfying the type constraints of the + // context in which it is used. + // + // Example: + // var _ = 1 + "" + InvalidUntypedConversion + + /* offsetof */ + + // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument + // that is not a selector expression. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Offsetof(x) + BadOffsetofSyntax + + // InvalidOffsetof occurs when unsafe.Offsetof is called with a method + // selector, rather than a field selector, or when the field is embedded via + // a pointer. + // + // Per the spec: + // + // "If f is an embedded field, it must be reachable without pointer + // indirections through fields of the struct. " + // + // Example: + // import "unsafe" + // + // type T struct { f int } + // type S struct { *T } + // var s S + // var _ = unsafe.Offsetof(s.f) + // + // Example: + // import "unsafe" + // + // type S struct{} + // + // func (S) m() {} + // + // var s S + // var _ = unsafe.Offsetof(s.m) + InvalidOffsetof + + /* control flow > scope */ + + // UnusedExpr occurs when a side-effect free expression is used as a + // statement. Such a statement has no effect. + // + // Example: + // func f(i int) { + // i*i + // } + UnusedExpr + + // UnusedVar occurs when a variable is declared but unused. + // + // Example: + // func f() { + // x := 1 + // } + UnusedVar + + // MissingReturn occurs when a function with results is missing a return + // statement. + // + // Example: + // func f() int {} + MissingReturn + + // WrongResultCount occurs when a return statement returns an incorrect + // number of values. + // + // Example: + // func ReturnOne() int { + // return 1, 2 + // } + WrongResultCount + + // OutOfScopeResult occurs when the name of a value implicitly returned by + // an empty return statement is shadowed in a nested scope. + // + // Example: + // func factor(n int) (i int) { + // for i := 2; i < n; i++ { + // if n%i == 0 { + // return + // } + // } + // return 0 + // } + OutOfScopeResult + + /* control flow > if */ + + // InvalidCond occurs when an if condition is not a boolean expression. + // + // Example: + // func checkReturn(i int) { + // if i { + // panic("non-zero return") + // } + // } + InvalidCond + + /* control flow > for */ + + // InvalidPostDecl occurs when there is a declaration in a for-loop post + // statement. + // + // Example: + // func f() { + // for i := 0; i < 10; j := 0 {} + // } + InvalidPostDecl + + // InvalidChanRange occurs when a send-only channel used in a range + // expression. + // + // Example: + // func sum(c chan<- int) { + // s := 0 + // for i := range c { + // s += i + // } + // } + InvalidChanRange + + // InvalidIterVar occurs when two iteration variables are used while ranging + // over a channel. + // + // Example: + // func f(c chan int) { + // for k, v := range c { + // println(k, v) + // } + // } + InvalidIterVar + + // InvalidRangeExpr occurs when the type of a range expression is not array, + // slice, string, map, or channel. + // + // Example: + // func f(i int) { + // for j := range i { + // println(j) + // } + // } + InvalidRangeExpr + + /* control flow > switch */ + + // MisplacedBreak occurs when a break statement is not within a for, switch, + // or select statement of the innermost function definition. + // + // Example: + // func f() { + // break + // } + MisplacedBreak + + // MisplacedContinue occurs when a continue statement is not within a for + // loop of the innermost function definition. + // + // Example: + // func sumeven(n int) int { + // proceed := func() { + // continue + // } + // sum := 0 + // for i := 1; i <= n; i++ { + // if i % 2 != 0 { + // proceed() + // } + // sum += i + // } + // return sum + // } + MisplacedContinue + + // MisplacedFallthrough occurs when a fallthrough statement is not within an + // expression switch. + // + // Example: + // func typename(i interface{}) string { + // switch i.(type) { + // case int64: + // fallthrough + // case int: + // return "int" + // } + // return "unsupported" + // } + MisplacedFallthrough + + // DuplicateCase occurs when a type or expression switch has duplicate + // cases. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // case 1: + // println("One") + // } + // } + DuplicateCase + + // DuplicateDefault occurs when a type or expression switch has multiple + // default clauses. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // default: + // println("One") + // default: + // println("1") + // } + // } + DuplicateDefault + + // BadTypeKeyword occurs when a .(type) expression is used anywhere other + // than a type switch. + // + // Example: + // type I interface { + // m() + // } + // var t I + // var _ = t.(type) + BadTypeKeyword + + // InvalidTypeSwitch occurs when .(type) is used on an expression that is + // not of interface type. + // + // Example: + // func f(i int) { + // switch x := i.(type) {} + // } + InvalidTypeSwitch + + // InvalidExprSwitch occurs when a switch expression is not comparable. + // + // Example: + // func _() { + // var a struct{ _ func() } + // switch a /* ERROR cannot switch on a */ { + // } + // } + InvalidExprSwitch + + /* control flow > select */ + + // InvalidSelectCase occurs when a select case is not a channel send or + // receive. + // + // Example: + // func checkChan(c <-chan int) bool { + // select { + // case c: + // return true + // default: + // return false + // } + // } + InvalidSelectCase + + /* control flow > labels and jumps */ + + // UndeclaredLabel occurs when an undeclared label is jumped to. + // + // Example: + // func f() { + // goto L + // } + UndeclaredLabel + + // DuplicateLabel occurs when a label is declared more than once. + // + // Example: + // func f() int { + // L: + // L: + // return 1 + // } + DuplicateLabel + + // MisplacedLabel occurs when a break or continue label is not on a for, + // switch, or select statement. + // + // Example: + // func f() { + // L: + // a := []int{1,2,3} + // for _, e := range a { + // if e > 10 { + // break L + // } + // println(a) + // } + // } + MisplacedLabel + + // UnusedLabel occurs when a label is declared but not used. + // + // Example: + // func f() { + // L: + // } + UnusedLabel + + // JumpOverDecl occurs when a label jumps over a variable declaration. + // + // Example: + // func f() int { + // goto L + // x := 2 + // L: + // x++ + // return x + // } + JumpOverDecl + + // JumpIntoBlock occurs when a forward jump goes to a label inside a nested + // block. + // + // Example: + // func f(x int) { + // goto L + // if x > 0 { + // L: + // print("inside block") + // } + // } + JumpIntoBlock + + /* control flow > calls */ + + // InvalidMethodExpr occurs when a pointer method is called but the argument + // is not addressable. + // + // Example: + // type T struct {} + // + // func (*T) m() int { return 1 } + // + // var _ = T.m(T{}) + InvalidMethodExpr + + // WrongArgCount occurs when too few or too many arguments are passed by a + // function call. + // + // Example: + // func f(i int) {} + // var x = f() + WrongArgCount + + // InvalidCall occurs when an expression is called that is not of function + // type. + // + // Example: + // var x = "x" + // var y = x() + InvalidCall + + /* control flow > suspended */ + + // UnusedResults occurs when a restricted expression-only built-in function + // is suspended via go or defer. Such a suspension discards the results of + // these side-effect free built-in functions, and therefore is ineffectual. + // + // Example: + // func f(a []int) int { + // defer len(a) + // return i + // } + UnusedResults + + // InvalidDefer occurs when a deferred expression is not a function call, + // for example if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // defer int32(i) + // return i + // } + InvalidDefer + + // InvalidGo occurs when a go expression is not a function call, for example + // if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // go int32(i) + // return i + // } + InvalidGo +) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go new file mode 100644 index 000000000..3e5842a5f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -0,0 +1,153 @@ +// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. + +package typesinternal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Test-1] + _ = x[BlankPkgName-2] + _ = x[MismatchedPkgName-3] + _ = x[InvalidPkgUse-4] + _ = x[BadImportPath-5] + _ = x[BrokenImport-6] + _ = x[ImportCRenamed-7] + _ = x[UnusedImport-8] + _ = x[InvalidInitCycle-9] + _ = x[DuplicateDecl-10] + _ = x[InvalidDeclCycle-11] + _ = x[InvalidTypeCycle-12] + _ = x[InvalidConstInit-13] + _ = x[InvalidConstVal-14] + _ = x[InvalidConstType-15] + _ = x[UntypedNil-16] + _ = x[WrongAssignCount-17] + _ = x[UnassignableOperand-18] + _ = x[NoNewVar-19] + _ = x[MultiValAssignOp-20] + _ = x[InvalidIfaceAssign-21] + _ = x[InvalidChanAssign-22] + _ = x[IncompatibleAssign-23] + _ = x[UnaddressableFieldAssign-24] + _ = x[NotAType-25] + _ = x[InvalidArrayLen-26] + _ = x[BlankIfaceMethod-27] + _ = x[IncomparableMapKey-28] + _ = x[InvalidIfaceEmbed-29] + _ = x[InvalidPtrEmbed-30] + _ = x[BadRecv-31] + _ = x[InvalidRecv-32] + _ = x[DuplicateFieldAndMethod-33] + _ = x[DuplicateMethod-34] + _ = x[InvalidBlank-35] + _ = x[InvalidIota-36] + _ = x[MissingInitBody-37] + _ = x[InvalidInitSig-38] + _ = x[InvalidInitDecl-39] + _ = x[InvalidMainDecl-40] + _ = x[TooManyValues-41] + _ = x[NotAnExpr-42] + _ = x[TruncatedFloat-43] + _ = x[NumericOverflow-44] + _ = x[UndefinedOp-45] + _ = x[MismatchedTypes-46] + _ = x[DivByZero-47] + _ = x[NonNumericIncDec-48] + _ = x[UnaddressableOperand-49] + _ = x[InvalidIndirection-50] + _ = x[NonIndexableOperand-51] + _ = x[InvalidIndex-52] + _ = x[SwappedSliceIndices-53] + _ = x[NonSliceableOperand-54] + _ = x[InvalidSliceExpr-55] + _ = x[InvalidShiftCount-56] + _ = x[InvalidShiftOperand-57] + _ = x[InvalidReceive-58] + _ = x[InvalidSend-59] + _ = x[DuplicateLitKey-60] + _ = x[MissingLitKey-61] + _ = x[InvalidLitIndex-62] + _ = x[OversizeArrayLit-63] + _ = x[MixedStructLit-64] + _ = x[InvalidStructLit-65] + _ = x[MissingLitField-66] + _ = x[DuplicateLitField-67] + _ = x[UnexportedLitField-68] + _ = x[InvalidLitField-69] + _ = x[UntypedLit-70] + _ = x[InvalidLit-71] + _ = x[AmbiguousSelector-72] + _ = x[UndeclaredImportedName-73] + _ = x[UnexportedName-74] + _ = x[UndeclaredName-75] + _ = x[MissingFieldOrMethod-76] + _ = x[BadDotDotDotSyntax-77] + _ = x[NonVariadicDotDotDot-78] + _ = x[MisplacedDotDotDot-79] + _ = x[InvalidDotDotDotOperand-80] + _ = x[InvalidDotDotDot-81] + _ = x[UncalledBuiltin-82] + _ = x[InvalidAppend-83] + _ = x[InvalidCap-84] + _ = x[InvalidClose-85] + _ = x[InvalidCopy-86] + _ = x[InvalidComplex-87] + _ = x[InvalidDelete-88] + _ = x[InvalidImag-89] + _ = x[InvalidLen-90] + _ = x[SwappedMakeArgs-91] + _ = x[InvalidMake-92] + _ = x[InvalidReal-93] + _ = x[InvalidAssert-94] + _ = x[ImpossibleAssert-95] + _ = x[InvalidConversion-96] + _ = x[InvalidUntypedConversion-97] + _ = x[BadOffsetofSyntax-98] + _ = x[InvalidOffsetof-99] + _ = x[UnusedExpr-100] + _ = x[UnusedVar-101] + _ = x[MissingReturn-102] + _ = x[WrongResultCount-103] + _ = x[OutOfScopeResult-104] + _ = x[InvalidCond-105] + _ = x[InvalidPostDecl-106] + _ = x[InvalidChanRange-107] + _ = x[InvalidIterVar-108] + _ = x[InvalidRangeExpr-109] + _ = x[MisplacedBreak-110] + _ = x[MisplacedContinue-111] + _ = x[MisplacedFallthrough-112] + _ = x[DuplicateCase-113] + _ = x[DuplicateDefault-114] + _ = x[BadTypeKeyword-115] + _ = x[InvalidTypeSwitch-116] + _ = x[InvalidExprSwitch-117] + _ = x[InvalidSelectCase-118] + _ = x[UndeclaredLabel-119] + _ = x[DuplicateLabel-120] + _ = x[MisplacedLabel-121] + _ = x[UnusedLabel-122] + _ = x[JumpOverDecl-123] + _ = x[JumpIntoBlock-124] + _ = x[InvalidMethodExpr-125] + _ = x[WrongArgCount-126] + _ = x[InvalidCall-127] + _ = x[UnusedResults-128] + _ = x[InvalidDefer-129] + _ = x[InvalidGo-130] +} + +const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGo" + +var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1749, 1764, 1778, 1792, 1803, 1815, 1828, 1845, 1858, 1869, 1882, 1894, 1903} + +func (i ErrorCode) String() string { + i -= 1 + if i < 0 || i >= ErrorCode(len(_ErrorCode_index)-1) { + return "ErrorCode(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]] +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 000000000..c3e1a397d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typesinternal provides access to internal go/types APIs that are not +// yet exported. +package typesinternal + +import ( + "go/token" + "go/types" + "reflect" + "unsafe" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} + +func ReadGo116ErrorData(terr types.Error) (ErrorCode, token.Pos, token.Pos, bool) { + var data [3]int + // By coincidence all of these fields are ints, which simplifies things. + v := reflect.ValueOf(terr) + for i, name := range []string{"go116code", "go116start", "go116end"} { + f := v.FieldByName(name) + if !f.IsValid() { + return 0, 0, 0, false + } + data[i] = int(f.Int()) + } + return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true +} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml index 70ffe89d5..6d03f4d36 100644 --- a/vendor/google.golang.org/appengine/.travis.yml +++ b/vendor/google.golang.org/appengine/.travis.yml @@ -10,8 +10,6 @@ script: matrix: include: - - go: 1.8.x - env: GOAPP=true - go: 1.9.x env: GOAPP=true - go: 1.10.x diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index a6ec19e14..721053c20 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -58,8 +58,11 @@ var ( apiHTTPClient = &http.Client{ Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, + Proxy: http.ProxyFromEnvironment, + Dial: limitDial, + MaxIdleConns: 1000, + MaxIdleConnsPerHost: 10000, + IdleConnTimeout: 90 * time.Second, }, } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go index bccee044b..191bea48c 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/api/annotations.proto @@ -23,7 +23,6 @@ package annotations import ( reflect "reflect" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.MethodOptions)(nil), diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 15b02156d..66fdb650f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/api/client.proto @@ -23,7 +23,6 @@ package annotations import ( reflect "reflect" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.MethodOptions)(nil), diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 07d8486db..164e0df0b 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.27.1 // protoc v3.12.2 // source: google/api/field_behavior.proto @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // An indicator of the behavior of a given field (for example, that a field // is required in requests, or given as output but ignored as input). // This **does not** change the behavior in protocol buffers itself; it only @@ -78,6 +73,11 @@ const ( // in any arbitrary order, rather than the order the user originally // provided. Additionally, the list's order may or may not be stable. FieldBehavior_UNORDERED_LIST FieldBehavior = 6 + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 ) // Enum value maps for FieldBehavior. @@ -90,6 +90,7 @@ var ( 4: "INPUT_ONLY", 5: "IMMUTABLE", 6: "UNORDERED_LIST", + 7: "NON_EMPTY_DEFAULT", } FieldBehavior_value = map[string]int32{ "FIELD_BEHAVIOR_UNSPECIFIED": 0, @@ -99,6 +100,7 @@ var ( "INPUT_ONLY": 4, "IMMUTABLE": 5, "UNORDERED_LIST": 6, + "NON_EMPTY_DEFAULT": 7, } ) @@ -167,7 +169,7 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, - 0x8f, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, @@ -176,20 +178,22 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, - 0x06, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, - 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, - 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, - 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, - 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 6c56d6192..4f34ab73c 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/api/http.proto @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Defines the HTTP configuration for an API service. It contains a list of // [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method // to one or more HTTP REST API methods. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 32d5de97f..3571ad634 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/api/resource.proto @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // A description of the historical or future-looking state of the // resource pattern. type ResourceDescriptor_History int32 diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index 2ca10f836..b47efe82b 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/api/httpbody.proto @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Message that represents an arbitrary HTTP body. It should only be used for // payload formats that can't be represented as JSON, such as raw binary or // an HTML page. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index e79a53884..f34a38e4e 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/rpc/status.proto package status @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is // used by [gRPC](https://github.com/grpc). Each `Status` message contains diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index 5847d94e5..000000000 --- a/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: go - -matrix: - include: - - go: 1.14.x - env: VET=1 GO111MODULE=on - - go: 1.14.x - env: RACE=1 GO111MODULE=on - - go: 1.14.x - env: RUN386=1 - - go: 1.14.x - env: GRPC_GO_RETRY=on - - go: 1.14.x - env: TESTEXTRAS=1 - - go: 1.13.x - env: GO111MODULE=on - - go: 1.12.x - env: GO111MODULE=on - - go: 1.11.x # Keep until interop tests no longer require Go1.11 - env: GO111MODULE=on - -go_import_path: google.golang.org/grpc - -before_install: - - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi - -install: - - try3() { eval "$*" || eval "$*" || eval "$*"; } - - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi - -script: - - set -e - - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; security/advancedtls/examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - - if [[ -n "${VET}" ]]; then ./vet.sh; fi - - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - - make test diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 3949a683f..0e6ae69a5 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -136,6 +136,6 @@ errors. [Go module]: https://github.com/golang/go/wiki/Modules [gRPC]: https://grpc.io [Go gRPC docs]: https://grpc.io/docs/languages/go -[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 [quick start]: https://grpc.io/docs/languages/go/quickstart [go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 4cc7f9159..dd8397963 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -43,7 +43,8 @@ type ccBalancerWrapper struct { cc *ClientConn balancerMu sync.Mutex // synchronizes calls to the balancer balancer balancer.Balancer - scBuffer *buffer.Unbounded + updateCh *buffer.Unbounded + closed *grpcsync.Event done *grpcsync.Event mu sync.Mutex @@ -53,7 +54,8 @@ type ccBalancerWrapper struct { func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, - scBuffer: buffer.NewUnbounded(), + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), subConns: make(map[*acBalancerWrapper]struct{}), } @@ -67,35 +69,53 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.scBuffer.Get(): - ccb.scBuffer.Load() - if ccb.done.HasFired() { + case t := <-ccb.updateCh.Get(): + ccb.updateCh.Load() + if ccb.closed.HasFired() { break } - ccb.balancerMu.Lock() - su := t.(*scStateUpdate) - ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - ccb.balancerMu.Unlock() - case <-ccb.done.Done(): + switch u := t.(type) { + case *scStateUpdate: + ccb.balancerMu.Lock() + ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) + ccb.balancerMu.Unlock() + case *acBalancerWrapper: + ccb.mu.Lock() + if ccb.subConns != nil { + delete(ccb.subConns, u) + ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) + } + ccb.mu.Unlock() + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + } + case <-ccb.closed.Done(): } - if ccb.done.HasFired() { + if ccb.closed.HasFired() { + ccb.balancerMu.Lock() ccb.balancer.Close() + ccb.balancerMu.Unlock() ccb.mu.Lock() scs := ccb.subConns ccb.subConns = nil ccb.mu.Unlock() + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + ccb.done.Fire() + // Fire done before removing the addr conns. We can safely unblock + // ccb.close and allow the removeAddrConns to happen + // asynchronously. for acbw := range scs { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) return } } } func (ccb *ccBalancerWrapper) close() { - ccb.done.Fire() + ccb.closed.Fire() + <-ccb.done.Done() } func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { @@ -109,7 +129,7 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co if sc == nil { return } - ccb.scBuffer.Put(&scStateUpdate{ + ccb.updateCh.Put(&scStateUpdate{ sc: sc, state: s, err: err, @@ -150,17 +170,10 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - delete(ccb.subConns, acbw) - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock + // during switchBalancer() if the old balancer calls RemoveSubConn() in its + // Close(). + ccb.updateCh.Put(sc) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 24109264f..b2bccfed1 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -711,7 +711,12 @@ func (cc *ClientConn) switchBalancer(name string) { return } if cc.balancerWrapper != nil { + // Don't hold cc.mu while closing the balancers. The balancers may call + // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex + // would cause a deadlock in that case. + cc.mu.Unlock() cc.balancerWrapper.close() + cc.mu.Lock() } builder := balancer.Get(name) @@ -1046,12 +1051,12 @@ func (cc *ClientConn) Close() error { cc.blockingpicker.close() - if rWrapper != nil { - rWrapper.close() - } if bWrapper != nil { bWrapper.close() } + if rWrapper != nil { + rWrapper.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1424,26 +1429,14 @@ func (ac *addrConn) resetConnectBackoff() { ac.mu.Unlock() } -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { - t := ac.transport - ac.mu.Unlock() - return t, true + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport } - var idle bool - if ac.state == connectivity.Idle { - idle = true - } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect() - } - return nil, false + return nil } // tearDown starts to tear down the addrConn. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 7d7a3056b..c2fdd58b3 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -69,7 +69,8 @@ type writerSink struct { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) @@ -85,24 +86,27 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Close() error { return nil } type bufferedSink struct { - mu sync.Mutex - closer io.Closer - out Sink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool - writeStartOnce sync.Once - writeTicker *time.Ticker + writeTicker *time.Ticker + done chan struct{} } func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() return err } - fs.mu.Unlock() return nil } @@ -113,7 +117,12 @@ const ( func (fs *bufferedSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { - for range fs.writeTicker.C { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } fs.mu.Lock() if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) @@ -124,10 +133,12 @@ func (fs *bufferedSink) startFlushGoroutine() { } func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() if fs.writeTicker != nil { fs.writeTicker.Stop() } - fs.mu.Lock() + close(fs.done) if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) } @@ -137,7 +148,6 @@ func (fs *bufferedSink) Close() error { if err := fs.out.Close(); err != nil { grpclogLogger.Warningf("failed to close the Sink: %v", err) } - fs.mu.Unlock() return nil } @@ -155,5 +165,6 @@ func NewBufferedSink(o io.WriteCloser) Sink { closer: o, out: newWriterSink(bufW), buf: bufW, + done: make(chan struct{}), } } diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 200b115ca..740f83c2b 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -31,26 +31,37 @@ var ( mu sync.Mutex ) +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + // Int63n implements rand.Int63n on the grpcrand global source. func Int63n(n int64) int64 { mu.Lock() - res := r.Int63n(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Int63n(n) } // Intn implements rand.Intn on the grpcrand global source. func Intn(n int) int { mu.Lock() - res := r.Intn(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Intn(n) } // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() - res := r.Float64() - mu.Unlock() - return res + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 710223b8d..e5c6513ed 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -97,7 +97,7 @@ func (s *Status) Err() error { if s.Code() == codes.OK { return nil } - return &Error{e: s.Proto()} + return &Error{s: s} } // WithDetails returns a new status with the provided details messages appended to the status. @@ -136,19 +136,23 @@ func (s *Status) Details() []interface{} { return details } +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + // Error wraps a pointer of a status proto. It implements error and Status, // and a nil *Error should never be returned by this package. type Error struct { - e *spb.Status + s *Status } func (e *Error) Error() string { - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) + return e.s.String() } // GRPCStatus returns the Status represented by se. func (e *Error) GRPCStatus() *Status { - return FromProto(e.e) + return e.s } // Is implements future error.Is functionality. @@ -158,5 +162,5 @@ func (e *Error) Is(target error) bool { if !ok { return false } - return proto.Equal(e.e, tse.e) + return proto.Equal(e.s.s, tse.s.s) } diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index f63a01376..45532f8ae 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -296,7 +296,7 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // *chan struct{} + trfChan atomic.Value // chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { @@ -310,10 +310,10 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer { // throttle blocks if there are too many incomingSettings/cleanupStreams in the // controlbuf. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(*chan struct{}) + ch, _ := c.trfChan.Load().(chan struct{}) if ch != nil { select { - case <-*ch: + case <-ch: case <-c.done: } } @@ -347,8 +347,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - ch := make(chan struct{}) - c.trfChan.Store(&ch) + c.trfChan.Store(make(chan struct{})) } } c.mu.Unlock() @@ -389,9 +388,9 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are removing the frame that put us over the // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(*chan struct{}) - close(*ch) - c.trfChan.Store((*chan struct{})(nil)) + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) } c.transportResponseFrames-- } @@ -407,7 +406,6 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - c.finish() return nil, ErrConnClosing } } @@ -432,6 +430,14 @@ func (c *controlBuffer) finish() { hdr.onOrphaned(ErrConnClosing) } } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) c.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 05d3871e6..1c3459c2b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -141,9 +141,8 @@ type serverHandlerTransport struct { stats stats.Handler } -func (ht *serverHandlerTransport) Close() error { +func (ht *serverHandlerTransport) Close() { ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil } func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 48c5e52ed..0cd6da1e7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -24,6 +24,7 @@ import ( "io" "math" "net" + "net/http" "strconv" "strings" "sync" @@ -241,7 +242,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // and passed to the credential handshaker. This makes it possible for // address specific arbitrary data to reach the credential handshaker. connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + rawConn := conn + // Pull the deadline from the connectCtx, which will be used for + // timeouts in the authentication protocol handshake. Can ignore the + // boolean as the deadline will return the zero value, which will make + // the conn not timeout on I/O operations. + deadline, _ := connectCtx.Deadline() + rawConn.SetDeadline(deadline) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) + rawConn.SetDeadline(time.Time{}) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -399,11 +408,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) } } - // If it's a connection error, let reader goroutine handle it - // since there might be data in the buffers. - if _, ok := err.(net.Error); !ok { - t.conn.Close() - } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() return t, nil @@ -608,26 +616,39 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } -// PerformedIOError wraps an error to indicate IO may have been performed -// before the error occurred. -type PerformedIOError struct { +// NewStreamError wraps an error and reports additional information. +type NewStreamError struct { Err error + + DoNotRetry bool + PerformedIO bool } -// Error implements error. -func (p PerformedIOError) Error() string { - return p.Err.Error() +func (e NewStreamError) Error() string { + return e.Err.Error() } // NewStream creates a stream and registers it into the transport as "active" -// streams. +// streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + defer func() { + if err != nil { + nse, ok := err.(*NewStreamError) + if !ok { + nse = &NewStreamError{Err: err} + } + if len(t.perRPCCreds) > 0 || callHdr.Creds != nil { + // We may have performed I/O in the per-RPC creds callback, so do not + // allow transparent retry. + nse.PerformedIO = true + } + err = nse + } + }() ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - // We may have performed I/O in the per-RPC creds callback, so do not - // allow transparent retry. - return nil, PerformedIOError{err} + return nil, err } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -733,7 +754,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea break } if hdrListSizeErr != nil { - return nil, hdrListSizeErr + return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} } firstTry = false select { @@ -878,12 +899,18 @@ func (t *http2Client) Close(err error) { // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status if len(goAwayDebugMessage) > 0 { - err = fmt.Errorf("closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) } + // Notify all active streams. for _, s := range streams { - t.closeStream(s, err, false, http2.ErrCodeNo, status.New(codes.Unavailable, err.Error()), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ @@ -1221,7 +1248,11 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } - t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %v", f.ErrCode, string(f.DebugData())) + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } } func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { @@ -1254,11 +1285,124 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. - state.data.isGRPC = !initialHeader - if h2code, err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, h2code, status.Convert(err), nil, endStream) + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1293,9 +1437,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata } } else { // HEADERS frame block carries a Trailers-Only. @@ -1308,9 +1452,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } + // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } // reader runs as a separate goroutine in charge of reading data from network diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 11be5599c..e3799d50a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -102,11 +102,11 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when drain(...) is called the first time. + // drainChan is initialized when Drain() is called the first time. // After which the server writes out the first GoAway(with ID 2^31-1) frame. // Then an independent goroutine will be launched to later send the second GoAway. // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is // already underway. drainChan chan struct{} state transportState @@ -125,9 +125,14 @@ type http2Server struct { connectionID uint64 } -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a non-nil transport and a nil-error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize @@ -266,6 +271,13 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Skipping the error here will help + // reduce log clutter. + if err == io.EOF { + return nil, nil + } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } if !bytes.Equal(preface, clientPreface) { @@ -295,6 +307,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } } t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() go t.keepalive() @@ -304,37 +317,92 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } - if h2code, err := state.decodeHeader(frame); err != nil { - if _, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: h2code, - onWrite: func() {}, - }) - } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) return false } buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = false + mdata = make(map[string][]string) + httpMethod string + // headerError is set if an error is encountered while parsing the headers + headerError bool + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = true + } + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = true + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || headerError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return false + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } @@ -347,14 +415,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx = peer.NewContext(s.ctx, pr) // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) - } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) - } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } } t.mu.Lock() if t.state != reachable { @@ -383,10 +451,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return true } t.maxStreamID = streamID - if state.data.httpMethod != http.MethodPost { + if httpMethod != http.MethodPost { t.mu.Unlock() if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", state.data.httpMethod) + logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) } t.controlBuf.put(&cleanupStream{ streamID: streamID, @@ -399,7 +467,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: state.data.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() if logger.V(logLevel) { logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) @@ -437,7 +505,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(state.data.mdata).Copy(), + Header: metadata.MD(mdata).Copy(), } t.stats.HandleRPC(s.ctx, inHeader) } @@ -1004,12 +1072,12 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() return } idleTimer.Reset(val) case <-ageTimer.C: - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: @@ -1063,11 +1131,11 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { +func (t *http2Server) Close() { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return } t.state = closing streams := t.activeStreams @@ -1075,7 +1143,9 @@ func (t *http2Server) Close() error { t.mu.Unlock() t.controlBuf.finish() close(t.done) - err := t.conn.Close() + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) + } if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) } @@ -1087,7 +1157,6 @@ func (t *http2Server) Close() error { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return err } // deleteStream deletes the stream s from transport's active streams. @@ -1152,17 +1221,13 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { t.mu.Lock() defer t.mu.Unlock() if t.drainChan != nil { return } t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index c7dee140c..d8247bcdf 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -39,7 +39,6 @@ import ( spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -96,53 +95,6 @@ var ( logger = grpclog.Component("transport") ) -type parsedHeaderData struct { - encoding string - // statusGen caches the stream status received from the trailer the server - // sent. Client side only. Do not access directly. After all trailers are - // parsed, use the status method to retrieve the status. - statusGen *status.Status - // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not - // intended for direct access outside of parsing. - rawStatusCode *int - rawStatusMsg string - httpStatus *int - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - httpMethod string - // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte - contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { - // whether decoding on server side or not - serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -180,14 +132,6 @@ func isWhitelistedHeader(hdr string) bool { } } -func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { - // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) - } - return d.data.statusGen -} - const binHdrSuffix = "-bin" func encodeBinHeader(v []byte) string { @@ -217,168 +161,16 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return http2.ErrCodeFrameSize, status.Error(codes.Internal, "peer header list size exceeded limit") +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err } - - for _, hf := range frame.Fields { - d.processHeaderField(hf) - } - - if d.data.isGRPC { - if d.data.grpcErr != nil { - return http2.ErrCodeProtocol, d.data.grpcErr - } - if d.serverSide { - return http2.ErrCodeNo, nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } - return http2.ErrCodeNo, nil - } - - // HTTP fallback mode - if d.data.httpErr != nil { - return http2.ErrCodeProtocol, d.data.httpErr - } - - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) - - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] - if !ok { - code = codes.Unknown - } - } - - return http2.ErrCodeProtocol, status.Error(code, d.constructHTTPErrMsg()) -} - -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - var errMsgs []string - - if d.data.httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) - } - - if d.data.contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) - } - - return strings.Join(errMsgs, "; ") -} - -func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) - } - d.data.mdata[k] = append(d.data.mdata[k], v) -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) - if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return - } - d.data.contentSubtype = contentSubtype - // TODO: do we want to propagate the whole content-type in the metadata, - // or come up with a way to just propagate the content-subtype if it was set? - // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} - // in the metadata? - d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true - case "grpc-encoding": - d.data.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.data.rawStatusCode = &code - case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) - case "grpc-status-details-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - s := &spb.Status{} - if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - d.data.statusGen = status.FromProto(s) - case "grpc-timeout": - d.data.timeoutSet = true - var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) - } - case ":path": - d.data.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return - } - d.data.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return - } - d.data.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return - } - d.data.statsTrace = v - d.addMetadata(f.Name, string(v)) - case ":method": - d.data.httpMethod = f.Value - default: - if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { - break - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - if logger.V(logLevel) { - logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - } - return - } - d.addMetadata(f.Name, v) + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err } + return status.FromProto(st), nil } type timeoutUnit uint8 diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go index 96967428b..7bb53cff1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -17,7 +17,7 @@ */ // Package networktype declares the network type to be used in the default -// dailer. Attribute of a resolver.Address. +// dialer. Attribute of a resolver.Address. package networktype import ( diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 6cc1031fd..141981264 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -532,12 +532,6 @@ type ServerConfig struct { HeaderTableSize *uint32 } -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { - return newHTTP2Server(conn, config) -} - // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. @@ -694,7 +688,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() error + Close() // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index e4cbea917..3604c7819 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -93,12 +93,16 @@ func (md MD) Copy() MD { } // Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. func (md MD) Get(k string) []string { k = strings.ToLower(k) return md[k] } // Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. func (md MD) Set(k string, vals ...string) { if len(vals) == 0 { return @@ -107,7 +111,10 @@ func (md MD) Set(k string, vals ...string) { md[k] = vals } -// Append adds the values to key k, not overwriting what was already stored at that key. +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. func (md MD) Append(k string, vals ...string) { if len(vals) == 0 { return @@ -116,9 +123,17 @@ func (md MD) Append(k string, vals ...string) { md[k] = append(md[k], vals...) } +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + // Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -145,8 +160,8 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { } // AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the -// documentation of Pairs for a description of kv. +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) @@ -159,20 +174,34 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromIncomingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdIncomingKey{}).(MD) - return +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := MD{} + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } + return out, true } -// FromOutgoingContextRaw returns the un-merged, intermediary contents -// of rawMD. Remember to perform strings.ToLower on the keys. The returned -// MD should not be modified. Writing to it may cause races. Modification -// should be made to copies of the returned MD. +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // -// This is intended for gRPC-internal use ONLY. +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +// +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { @@ -182,16 +211,23 @@ func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { return raw.md, raw.added, true } -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. func FromOutgoingContext(ctx context.Context) (MD, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, false } - out := raw.md.Copy() + out := MD{} + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } for _, added := range raw.added { if len(added)%2 == 1 { panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a58174b6f..0878ada9d 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -147,7 +147,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. logger.Error("subconn returned from pick is not *acBalancerWrapper") continue } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { return t, doneChannelzWrapper(acw, pickResult.Done), nil } diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index 3679d702a..f6e7b5ae3 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -27,7 +27,9 @@ import ( // NewBuilderWithScheme creates a new test resolver builder with the given scheme. func NewBuilderWithScheme(scheme string) *Resolver { return &Resolver{ + BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, ResolveNowCallback: func(resolver.ResolveNowOptions) {}, + CloseCallback: func() {}, scheme: scheme, } } @@ -35,11 +37,17 @@ func NewBuilderWithScheme(scheme string) *Resolver { // Resolver is also a resolver builder. // It's build() function always returns itself. type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) // ResolveNowCallback is called when the ResolveNow method is called on the // resolver. Must not be nil. Must not be changed after the resolver may // be built. ResolveNowCallback func(resolver.ResolveNowOptions) - scheme string + // CloseCallback is called when the Close method is called. Must not be + // nil. Must not be changed after the resolver may be built. + CloseCallback func() + scheme string // Fields actually belong to the resolver. CC resolver.ClientConn @@ -54,6 +62,7 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.BuildCallback(target, cc, opts) r.CC = cc if r.bootstrapState != nil { r.UpdateState(*r.bootstrapState) @@ -72,9 +81,16 @@ func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { } // Close is a noop for Resolver. -func (*Resolver) Close() {} +func (r *Resolver) Close() { + r.CloseCallback() +} // UpdateState calls CC.UpdateState. func (r *Resolver) UpdateState(s resolver.State) { r.CC.UpdateState(s) } + +// ReportError calls CC.ReportError. +func (r *Resolver) ReportError(err error) { + r.CC.ReportError(err) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 4118de571..2c47cd54f 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -39,6 +39,8 @@ type ccResolverWrapper struct { resolver resolver.Resolver done *grpcsync.Event curState resolver.State + + incomingMu sync.Mutex // Synchronizes all the incoming calls. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and @@ -90,6 +92,8 @@ func (ccr *ccResolverWrapper) close() { } func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return nil } @@ -105,6 +109,8 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { } func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } @@ -114,6 +120,8 @@ func (ccr *ccResolverWrapper) ReportError(err error) { // NewAddress is called by the resolver implementation to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } @@ -128,6 +136,8 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 6db356fa5..87987a2e6 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -258,7 +258,8 @@ func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { } // WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will // retry the call if it fails due to a transient error. gRPC will not retry if @@ -828,26 +829,28 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { - if err == nil || err == io.EOF { + switch err { + case nil, io.EOF: return err - } - if err == io.ErrUnexpectedEOF { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } - if _, ok := status.FromError(err); ok { - return err - } + switch e := err.(type) { case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } + case *transport.NewStreamError: + return toRPCErr(e.Err) } + + if _, ok := status.FromError(err); ok { + return err + } + return status.Error(codes.Unknown, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 0a151dee4..0251f48da 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -844,10 +844,16 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { // ErrConnDispatched means that the connection was dispatched away from // gRPC; those connections should be left open. if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // In deployments where a gRPC server runs behind a cloud load + // balancer which performs regular TCP level health checks, the + // connection is closed immediately by the latter. Skipping the + // error here will help reduce log clutter. + if err != io.EOF { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + } rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -857,6 +863,7 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { // Finish handshaking (HTTP2) st := s.newHTTP2Transport(conn, authInfo) if st == nil { + conn.Close() return } @@ -897,7 +904,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } - st, err := transport.NewServerTransport("http2", c, config) + st, err := transport.NewServerTransport(c, config) if err != nil { s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) @@ -1109,22 +1116,24 @@ func chainUnaryServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) - } + chainedInt = chainUnaryInterceptors(interceptors) } s.opts.unaryInt = chainedInt } -// getChainUnaryHandler recursively generate the chained UnaryHandler -func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(ctx context.Context, req interface{}) (interface{}, error) { - return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + var i int + var next UnaryHandler + next = func(ctx context.Context, req interface{}) (interface{}, error) { + if i == len(interceptors)-1 { + return interceptors[i](ctx, req, info, handler) + } + i++ + return interceptors[i-1](ctx, req, info, next) + } + return next(ctx, req) } } @@ -1138,7 +1147,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1390,22 +1401,24 @@ func chainStreamServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) - } + chainedInt = chainStreamInterceptors(interceptors) } s.opts.streamInt = chainedInt } -// getChainStreamHandler recursively generate the chained StreamHandler -func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(srv interface{}, ss ServerStream) error { - return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + var i int + var next StreamHandler + next = func(srv interface{}, ss ServerStream) error { + if i == len(interceptors)-1 { + return interceptors[i](srv, ss, info, handler) + } + i++ + return interceptors[i-1](srv, ss, info, next) + } + return next(srv, ss) } } @@ -1418,7 +1431,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1521,6 +1536,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp } } + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) } @@ -1588,7 +1605,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 63e476ee7..a5ebeeb69 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -45,6 +45,10 @@ type Begin struct { BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool } // IsClient indicates if the stats information is from client side. diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 1f3e70d2c..e224af12d 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -295,9 +295,11 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) beginTime = time.Now() begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + IsClientStream: desc.ClientStreams, + IsServerStream: desc.ServerStreams, } sh.HandleRPC(ctx, begin) } @@ -419,12 +421,9 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(cs.ctx, cs.callHdr) if err != nil { - if _, ok := err.(transport.PerformedIOError); ok { - // Return without converting to an RPC error so retry code can - // inspect. - return err - } - return toRPCErr(err) + // Return without converting to an RPC error so retry code can + // inspect. + return err } cs.attempt.s = s cs.attempt.p = &parser{r: s} @@ -523,19 +522,28 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. func (cs *clientStream) shouldRetry(err error) error { - unprocessed := false if cs.attempt.s == nil { - pioErr, ok := err.(transport.PerformedIOError) - if ok { - // Unwrap error. - err = toRPCErr(pioErr.Err) - } else { - unprocessed = true + // Error from NewClientStream. + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected, but assume no I/O was performed and the RPC is not + // fatal, so retry indefinitely. + return nil } - if !ok && !cs.callInfo.failFast { - // In the event of a non-IO operation error from NewStream, we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. + + // Unwrap and convert error. + err = toRPCErr(nse.Err) + + // Never retry DoNotRetry errors, which indicate the RPC should not be + // retried due to max header list size violation, etc. + if nse.DoNotRetry { + return err + } + + // In the event of a non-IO operation error from NewStream, we never + // attempted to write anything to the wire, so we can retry + // indefinitely. + if !nse.PerformedIO { return nil } } @@ -544,6 +552,7 @@ func (cs *clientStream) shouldRetry(err error) error { return err } // Wait for the trailers. + unprocessed := false if cs.attempt.s != nil { <-cs.attempt.s.Done() unprocessed = cs.attempt.s.Unprocessed() @@ -632,7 +641,7 @@ func (cs *clientStream) shouldRetry(err error) error { // Returns nil if a retry was performed and succeeded; error otherwise. func (cs *clientStream) retryLocked(lastErr error) error { for { - cs.attempt.finish(lastErr) + cs.attempt.finish(toRPCErr(lastErr)) if err := cs.shouldRetry(lastErr); err != nil { cs.commitAttemptLocked() return err @@ -659,7 +668,11 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) for { if cs.committed { cs.mu.Unlock() - return op(cs.attempt) + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) } a := cs.attempt cs.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bfe5cf887..e3510e10f 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.38.0" +const Version = "1.40.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 1a0dbd7ee..5eaa8b05d 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -32,26 +32,14 @@ PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" go version if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - fi + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then PROTOBUF_VERSION=3.14.0 diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 8fb1d9e08..179d6e8fc 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -744,9 +744,6 @@ func (d decoder) skipValue() error { // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. - if err := d.skipValue(); err != nil { - return err - } } } } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index aa66bdd06..da289ccce 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -263,3 +263,8 @@ func (e *Encoder) Snapshot() encoderState { func (e *Encoder) Reset(es encoderState) { e.encoderState = es } + +// AppendString appends the escaped form of the input string to b. +func AppendString(b []byte, s string) []byte { + return appendString(b, s, false) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 3759b010c..029feeefd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -440,6 +440,13 @@ func legacyMerge(in piface.MergeInput) piface.MergeOutput { if !ok { return piface.MergeOutput{} } + if !in.Source.IsValid() { + // Legacy Marshal methods may not function on nil messages. + // Check for a typed nil source only after we confirm that + // legacy Marshal/Unmarshal methods are present, for + // consistency. + return piface.MergeOutput{Flags: piface.MergeComplete} + } b, err := marshaler.Marshal() if err != nil { return piface.MergeOutput{} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 5879131da..14e774fb2 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,8 +52,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 26 - Patch = 0 + Minor = 27 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 66dcbcd0d..59f024c44 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -94,7 +94,8 @@ type Files struct { // Note that enum values are in the top-level since that are in the same // scope as the parent enum. descsByName map[protoreflect.FullName]interface{} - filesByPath map[string]protoreflect.FileDescriptor + filesByPath map[string][]protoreflect.FileDescriptor + numFiles int } type packageDescriptor struct { @@ -117,17 +118,16 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { r.descsByName = map[protoreflect.FullName]interface{}{ "": &packageDescriptor{}, } - r.filesByPath = make(map[string]protoreflect.FileDescriptor) + r.filesByPath = make(map[string][]protoreflect.FileDescriptor) } path := file.Path() - if prev := r.filesByPath[path]; prev != nil { + if prev := r.filesByPath[path]; len(prev) > 0 { r.checkGenProtoConflict(path) err := errors.New("file %q is already registered", file.Path()) - err = amendErrorWithCaller(err, prev, file) - if r == GlobalFiles && ignoreConflict(file, err) { - err = nil + err = amendErrorWithCaller(err, prev[0], file) + if !(r == GlobalFiles && ignoreConflict(file, err)) { + return err } - return err } for name := file.Package(); name != ""; name = name.Parent() { @@ -168,7 +168,8 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { r.descsByName[d.FullName()] = d }) - r.filesByPath[path] = file + r.filesByPath[path] = append(r.filesByPath[path], file) + r.numFiles++ return nil } @@ -308,6 +309,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // // This returns (nil, NotFound) if not found. +// This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { return nil, NotFound @@ -316,13 +318,19 @@ func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) globalMutex.RLock() defer globalMutex.RUnlock() } - if fd, ok := r.filesByPath[path]; ok { - return fd, nil + fds := r.filesByPath[path] + switch len(fds) { + case 0: + return nil, NotFound + case 1: + return fds[0], nil + default: + return nil, errors.New("multiple files named %q", path) } - return nil, NotFound } -// NumFiles reports the number of registered files. +// NumFiles reports the number of registered files, +// including duplicate files with the same name. func (r *Files) NumFiles() int { if r == nil { return 0 @@ -331,10 +339,11 @@ func (r *Files) NumFiles() int { globalMutex.RLock() defer globalMutex.RUnlock() } - return len(r.filesByPath) + return r.numFiles } // RangeFiles iterates over all registered files while f returns true. +// If multiple files have the same name, RangeFiles iterates over all of them. // The iteration order is undefined. func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { if r == nil { @@ -344,9 +353,11 @@ func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { globalMutex.RLock() defer globalMutex.RUnlock() } - for _, file := range r.filesByPath { - if !f(file) { - return + for _, files := range r.filesByPath { + for _, file := range files { + if !f(file) { + return + } } } } diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index f77239fc3..abe4ab511 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -43,7 +43,6 @@ package descriptorpb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoiface "google.golang.org/protobuf/runtime/protoiface" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" @@ -829,15 +828,6 @@ func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} } -var extRange_ExtensionRangeOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*ExtensionRangeOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_ExtensionRangeOptions -} - func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1520,15 +1510,6 @@ func (*FileOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} } -var extRange_FileOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use FileOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*FileOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_FileOptions -} - func (x *FileOptions) GetJavaPackage() string { if x != nil && x.JavaPackage != nil { return *x.JavaPackage @@ -1776,15 +1757,6 @@ func (*MessageOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} } -var extRange_MessageOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use MessageOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*MessageOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_MessageOptions -} - func (x *MessageOptions) GetMessageSetWireFormat() bool { if x != nil && x.MessageSetWireFormat != nil { return *x.MessageSetWireFormat @@ -1930,15 +1902,6 @@ func (*FieldOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} } -var extRange_FieldOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use FieldOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*FieldOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_FieldOptions -} - func (x *FieldOptions) GetCtype() FieldOptions_CType { if x != nil && x.Ctype != nil { return *x.Ctype @@ -2030,15 +1993,6 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } -var extRange_OneofOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use OneofOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*OneofOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_OneofOptions -} - func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2101,15 +2055,6 @@ func (*EnumOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} } -var extRange_EnumOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use EnumOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*EnumOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_EnumOptions -} - func (x *EnumOptions) GetAllowAlias() bool { if x != nil && x.AllowAlias != nil { return *x.AllowAlias @@ -2183,15 +2128,6 @@ func (*EnumValueOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} } -var extRange_EnumValueOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*EnumValueOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_EnumValueOptions -} - func (x *EnumValueOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2258,15 +2194,6 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } -var extRange_ServiceOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*ServiceOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_ServiceOptions -} - func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2335,15 +2262,6 @@ func (*MethodOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} } -var extRange_MethodOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use MethodOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*MethodOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_MethodOptions -} - func (x *MethodOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated diff --git a/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go index f7369471a..d35688285 100644 --- a/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go index 4f3dd45be..8234b322f 100644 --- a/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go index 8fc1cde0a..f96e8a443 100644 --- a/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go index ff544c3a3..29873b796 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -64,6 +64,7 @@ type Rule struct { } // ScopeType specifies a scope for a Rule. +// +enum type ScopeType string const ( @@ -77,6 +78,7 @@ const ( ) // FailurePolicyType specifies a failure policy that defines how unrecognized errors from the admission endpoint are handled. +// +enum type FailurePolicyType string const ( @@ -87,6 +89,7 @@ const ( ) // MatchPolicyType specifies the type of match policy. +// +enum type MatchPolicyType string const ( @@ -97,6 +100,7 @@ const ( ) // SideEffectClass specifies the types of side effects a webhook may have. +// +enum type SideEffectClass string const ( @@ -450,6 +454,7 @@ type MutatingWebhook struct { } // ReinvocationPolicyType specifies what type of policy the admission hook uses. +// +enum type ReinvocationPolicyType string const ( @@ -476,6 +481,7 @@ type RuleWithOperations struct { } // OperationType specifies an operation for a request. +// +enum type OperationType string // The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go index 3afb74673..cff7377a5 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index c4570d031..23ddb1fa2 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go index bfd93a05a..09a92f476 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go index 7e82a9070..44dffa751 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 0a15aff4d..81d51bd58 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -748,10 +748,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { + *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} +} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{25} +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.Merge(m, src) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.InternalMessageInfo + func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{25} + return fileDescriptor_e1014cab6f31e43b, []int{26} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -779,7 +809,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{26} + return fileDescriptor_e1014cab6f31e43b, []int{27} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -807,7 +837,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{27} + return fileDescriptor_e1014cab6f31e43b, []int{28} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -858,6 +888,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList") + proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") @@ -868,135 +899,142 @@ func init() { } var fileDescriptor_e1014cab6f31e43b = []byte{ - // 2047 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0x77, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, - 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, - 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, - 0x61, 0x47, 0x5c, 0x10, 0x12, 0x27, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xc8, 0x0d, 0x45, 0x88, 0xcb, - 0x5e, 0x90, 0x22, 0x2e, 0xe4, 0x64, 0xb1, 0x93, 0x13, 0x42, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, - 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x38, 0x24, 0xca, 0x6d, 0xba, 0xea, 0xf7, 0x7e, 0xf5, 0x5e, - 0xd5, 0xab, 0x7a, 0xbf, 0xae, 0x1e, 0x70, 0xef, 0xf8, 0xdb, 0x9e, 0xaa, 0xdb, 0xed, 0x63, 0xff, - 0x90, 0xb8, 0x16, 0xa1, 0xc4, 0x6b, 0x0f, 0x89, 0xa5, 0xd9, 0x6e, 0x5b, 0x74, 0x60, 0x47, 0x6f, - 0x63, 0xc7, 0xf1, 0xda, 0xc3, 0xdb, 0xed, 0x01, 0xb1, 0x88, 0x8b, 0x29, 0xd1, 0x54, 0xc7, 0xb5, - 0xa9, 0x0d, 0x61, 0x80, 0x51, 0xb1, 0xa3, 0xab, 0x0c, 0xa3, 0x0e, 0x6f, 0x5f, 0xbd, 0x35, 0xd0, - 0xe9, 0x91, 0x7f, 0xa8, 0xf6, 0x6d, 0xb3, 0x3d, 0xb0, 0x07, 0x76, 0x9b, 0x43, 0x0f, 0xfd, 0x47, - 0xfc, 0x89, 0x3f, 0xf0, 0x5f, 0x01, 0xc5, 0xd5, 0x56, 0x62, 0x98, 0xbe, 0xed, 0x12, 0xc9, 0x30, - 0x57, 0xef, 0xc6, 0x18, 0x13, 0xf7, 0x8f, 0x74, 0x8b, 0xb8, 0xa3, 0xb6, 0x73, 0x3c, 0x60, 0x0d, - 0x5e, 0xdb, 0x24, 0x14, 0xcb, 0xac, 0xda, 0x45, 0x56, 0xae, 0x6f, 0x51, 0xdd, 0x24, 0x39, 0x83, - 0xd7, 0x26, 0x19, 0x78, 0xfd, 0x23, 0x62, 0xe2, 0x9c, 0xdd, 0x2b, 0x45, 0x76, 0x3e, 0xd5, 0x8d, - 0xb6, 0x6e, 0x51, 0x8f, 0xba, 0x59, 0xa3, 0xd6, 0x7f, 0x14, 0x00, 0xbb, 0xb6, 0x45, 0x5d, 0xdb, - 0x30, 0x88, 0x8b, 0xc8, 0x50, 0xf7, 0x74, 0xdb, 0x82, 0x3f, 0x05, 0x35, 0x16, 0x8f, 0x86, 0x29, - 0xae, 0x2b, 0xd7, 0x95, 0x8d, 0x85, 0x3b, 0xdf, 0x52, 0xe3, 0x49, 0x8e, 0xe8, 0x55, 0xe7, 0x78, - 0xc0, 0x1a, 0x3c, 0x95, 0xa1, 0xd5, 0xe1, 0x6d, 0x75, 0xf7, 0xf0, 0x5d, 0xd2, 0xa7, 0x3d, 0x42, - 0x71, 0x07, 0x3e, 0x39, 0x69, 0xce, 0x8c, 0x4f, 0x9a, 0x20, 0x6e, 0x43, 0x11, 0x2b, 0xdc, 0x05, - 0x15, 0xce, 0x5e, 0xe2, 0xec, 0xb7, 0x0a, 0xd9, 0x45, 0xd0, 0x2a, 0xc2, 0x3f, 0x7b, 0xe3, 0x31, - 0x25, 0x16, 0x73, 0xaf, 0x73, 0x49, 0x50, 0x57, 0xb6, 0x30, 0xc5, 0x88, 0x13, 0xc1, 0x97, 0x41, - 0xcd, 0x15, 0xee, 0xd7, 0xcb, 0xd7, 0x95, 0x8d, 0x72, 0xe7, 0xb2, 0x40, 0xd5, 0xc2, 0xb0, 0x50, - 0x84, 0x68, 0xfd, 0x45, 0x01, 0x6b, 0xf9, 0xb8, 0xb7, 0x75, 0x8f, 0xc2, 0x77, 0x72, 0xb1, 0xab, - 0xd3, 0xc5, 0xce, 0xac, 0x79, 0xe4, 0xd1, 0xc0, 0x61, 0x4b, 0x22, 0xee, 0xb7, 0x41, 0x55, 0xa7, - 0xc4, 0xf4, 0xea, 0xa5, 0xeb, 0xe5, 0x8d, 0x85, 0x3b, 0x37, 0xd4, 0x7c, 0xee, 0xaa, 0x79, 0xc7, - 0x3a, 0x8b, 0x82, 0xb2, 0xfa, 0x16, 0x33, 0x46, 0x01, 0x47, 0xeb, 0xbf, 0x0a, 0x98, 0xdf, 0xc2, - 0xc4, 0xb4, 0xad, 0x7d, 0x42, 0x2f, 0x60, 0xd1, 0xba, 0xa0, 0xe2, 0x39, 0xa4, 0x2f, 0x16, 0xed, - 0x6b, 0x32, 0xdf, 0x23, 0x77, 0xf6, 0x1d, 0xd2, 0x8f, 0x17, 0x8a, 0x3d, 0x21, 0x6e, 0x0c, 0xdf, - 0x06, 0xb3, 0x1e, 0xc5, 0xd4, 0xf7, 0xf8, 0x32, 0x2d, 0xdc, 0xf9, 0xfa, 0xe9, 0x34, 0x1c, 0xda, - 0x59, 0x12, 0x44, 0xb3, 0xc1, 0x33, 0x12, 0x14, 0xad, 0x7f, 0x96, 0x00, 0x8c, 0xb0, 0x5d, 0xdb, - 0xd2, 0x74, 0xca, 0xf2, 0xf7, 0x75, 0x50, 0xa1, 0x23, 0x87, 0xf0, 0x69, 0x98, 0xef, 0xdc, 0x08, - 0xbd, 0xb8, 0x3f, 0x72, 0xc8, 0xc7, 0x27, 0xcd, 0xb5, 0xbc, 0x05, 0xeb, 0x41, 0xdc, 0x06, 0x6e, - 0x47, 0xfe, 0x95, 0xb8, 0xf5, 0xdd, 0xf4, 0xd0, 0x1f, 0x9f, 0x34, 0x25, 0x87, 0x85, 0x1a, 0x31, - 0xa5, 0x1d, 0x84, 0x43, 0x00, 0x0d, 0xec, 0xd1, 0xfb, 0x2e, 0xb6, 0xbc, 0x60, 0x24, 0xdd, 0x24, - 0x22, 0xf2, 0x97, 0xa6, 0x5b, 0x1e, 0x66, 0xd1, 0xb9, 0x2a, 0xbc, 0x80, 0xdb, 0x39, 0x36, 0x24, - 0x19, 0x01, 0xde, 0x00, 0xb3, 0x2e, 0xc1, 0x9e, 0x6d, 0xd5, 0x2b, 0x3c, 0x8a, 0x68, 0x02, 0x11, - 0x6f, 0x45, 0xa2, 0x17, 0xbe, 0x08, 0xe6, 0x4c, 0xe2, 0x79, 0x78, 0x40, 0xea, 0x55, 0x0e, 0x5c, - 0x16, 0xc0, 0xb9, 0x5e, 0xd0, 0x8c, 0xc2, 0xfe, 0xd6, 0xef, 0x15, 0xb0, 0x18, 0xcd, 0xdc, 0x05, - 0x6c, 0x95, 0x4e, 0x7a, 0xab, 0x3c, 0x7f, 0x6a, 0x9e, 0x14, 0xec, 0x90, 0xf7, 0xcb, 0x09, 0x9f, - 0x59, 0x12, 0xc2, 0x9f, 0x80, 0x9a, 0x47, 0x0c, 0xd2, 0xa7, 0xb6, 0x2b, 0x7c, 0x7e, 0x65, 0x4a, - 0x9f, 0xf1, 0x21, 0x31, 0xf6, 0x85, 0x69, 0xe7, 0x12, 0x73, 0x3a, 0x7c, 0x42, 0x11, 0x25, 0xfc, - 0x11, 0xa8, 0x51, 0x62, 0x3a, 0x06, 0xa6, 0x44, 0x6c, 0x93, 0x54, 0x7e, 0xb3, 0x74, 0x61, 0x64, - 0x7b, 0xb6, 0x76, 0x5f, 0xc0, 0xf8, 0x46, 0x89, 0xe6, 0x21, 0x6c, 0x45, 0x11, 0x0d, 0x3c, 0x06, - 0x4b, 0xbe, 0xa3, 0x31, 0x24, 0x65, 0x47, 0xf7, 0x60, 0x24, 0xd2, 0xe7, 0xe6, 0xa9, 0x13, 0x72, - 0x90, 0x32, 0xe9, 0xac, 0x89, 0x01, 0x96, 0xd2, 0xed, 0x28, 0x43, 0x0d, 0x37, 0xc1, 0xb2, 0xa9, - 0x5b, 0x88, 0x60, 0x6d, 0xb4, 0x4f, 0xfa, 0xb6, 0xa5, 0x79, 0x3c, 0x81, 0xaa, 0x9d, 0x75, 0x41, - 0xb0, 0xdc, 0x4b, 0x77, 0xa3, 0x2c, 0x1e, 0x6e, 0x83, 0xd5, 0xf0, 0x9c, 0xfd, 0x81, 0xee, 0x51, - 0xdb, 0x1d, 0x6d, 0xeb, 0xa6, 0x4e, 0xeb, 0xb3, 0x9c, 0xa7, 0x3e, 0x3e, 0x69, 0xae, 0x22, 0x49, - 0x3f, 0x92, 0x5a, 0xb5, 0x7e, 0x33, 0x0b, 0x96, 0x33, 0xa7, 0x01, 0x7c, 0x00, 0xd6, 0xfa, 0xbe, - 0xeb, 0x12, 0x8b, 0xee, 0xf8, 0xe6, 0x21, 0x71, 0xf7, 0xfb, 0x47, 0x44, 0xf3, 0x0d, 0xa2, 0xf1, - 0x15, 0xad, 0x76, 0x1a, 0xc2, 0xd7, 0xb5, 0xae, 0x14, 0x85, 0x0a, 0xac, 0xe1, 0x0f, 0x01, 0xb4, - 0x78, 0x53, 0x4f, 0xf7, 0xbc, 0x88, 0xb3, 0xc4, 0x39, 0xa3, 0x0d, 0xb8, 0x93, 0x43, 0x20, 0x89, - 0x15, 0xf3, 0x51, 0x23, 0x9e, 0xee, 0x12, 0x2d, 0xeb, 0x63, 0x39, 0xed, 0xe3, 0x96, 0x14, 0x85, - 0x0a, 0xac, 0xe1, 0xab, 0x60, 0x21, 0x18, 0x8d, 0xcf, 0xb9, 0x58, 0x9c, 0x15, 0x41, 0xb6, 0xb0, - 0x13, 0x77, 0xa1, 0x24, 0x8e, 0x85, 0x66, 0x1f, 0x7a, 0xc4, 0x1d, 0x12, 0xed, 0xcd, 0x40, 0x03, - 0xb0, 0x42, 0x59, 0xe5, 0x85, 0x32, 0x0a, 0x6d, 0x37, 0x87, 0x40, 0x12, 0x2b, 0x16, 0x5a, 0x90, - 0x35, 0xb9, 0xd0, 0x66, 0xd3, 0xa1, 0x1d, 0x48, 0x51, 0xa8, 0xc0, 0x9a, 0xe5, 0x5e, 0xe0, 0xf2, - 0xe6, 0x10, 0xeb, 0x06, 0x3e, 0x34, 0x48, 0x7d, 0x2e, 0x9d, 0x7b, 0x3b, 0xe9, 0x6e, 0x94, 0xc5, - 0xc3, 0x37, 0xc1, 0x95, 0xa0, 0xe9, 0xc0, 0xc2, 0x11, 0x49, 0x8d, 0x93, 0x3c, 0x27, 0x48, 0xae, - 0xec, 0x64, 0x01, 0x28, 0x6f, 0x03, 0x5f, 0x07, 0x4b, 0x7d, 0xdb, 0x30, 0x78, 0x3e, 0x76, 0x6d, - 0xdf, 0xa2, 0xf5, 0x79, 0xce, 0x02, 0xd9, 0x1e, 0xea, 0xa6, 0x7a, 0x50, 0x06, 0x09, 0x1f, 0x02, - 0xd0, 0x0f, 0xcb, 0x81, 0x57, 0x07, 0xc5, 0x85, 0x3e, 0x5f, 0x87, 0xe2, 0x02, 0x1c, 0x35, 0x79, - 0x28, 0xc1, 0xd6, 0x7a, 0x5f, 0x01, 0xeb, 0x05, 0x7b, 0x1c, 0x7e, 0x2f, 0x55, 0xf5, 0x6e, 0x66, - 0xaa, 0xde, 0xb5, 0x02, 0xb3, 0x44, 0xe9, 0xeb, 0x83, 0x45, 0xa6, 0x3b, 0x74, 0x6b, 0x10, 0x40, - 0xc4, 0x09, 0xf6, 0x92, 0xcc, 0x77, 0x94, 0x04, 0xc6, 0xc7, 0xf0, 0x95, 0xf1, 0x49, 0x73, 0x31, - 0xd5, 0x87, 0xd2, 0x9c, 0xad, 0x5f, 0x96, 0x00, 0xd8, 0x22, 0x8e, 0x61, 0x8f, 0x4c, 0x62, 0x5d, - 0x84, 0x6a, 0xd9, 0x4a, 0xa9, 0x96, 0x96, 0x74, 0x21, 0x22, 0x7f, 0x0a, 0x65, 0xcb, 0x76, 0x46, - 0xb6, 0x7c, 0x63, 0x02, 0xcf, 0xe9, 0xba, 0xe5, 0xef, 0x65, 0xb0, 0x12, 0x83, 0x63, 0xe1, 0x72, - 0x2f, 0xb5, 0x84, 0x2f, 0x64, 0x96, 0x70, 0x5d, 0x62, 0xf2, 0xa9, 0x29, 0x97, 0x77, 0xc1, 0x12, - 0xd3, 0x15, 0xc1, 0xaa, 0x71, 0xd5, 0x32, 0x7b, 0x66, 0xd5, 0x12, 0x55, 0x9d, 0xed, 0x14, 0x13, - 0xca, 0x30, 0x17, 0xa8, 0xa4, 0xb9, 0xcf, 0xa3, 0x4a, 0xfa, 0x83, 0x02, 0x96, 0xe2, 0x65, 0xba, - 0x00, 0x99, 0xd4, 0x4d, 0xcb, 0xa4, 0xc6, 0xe9, 0x79, 0x59, 0xa0, 0x93, 0xfe, 0x56, 0x49, 0x7a, - 0xcd, 0x85, 0xd2, 0x06, 0x7b, 0xa1, 0x72, 0x0c, 0xbd, 0x8f, 0x3d, 0x51, 0x56, 0x2f, 0x05, 0x2f, - 0x53, 0x41, 0x1b, 0x8a, 0x7a, 0x53, 0x92, 0xaa, 0xf4, 0xe9, 0x4a, 0xaa, 0xf2, 0x27, 0x23, 0xa9, - 0xee, 0x83, 0x9a, 0x17, 0x8a, 0xa9, 0x0a, 0xa7, 0xbc, 0x31, 0x69, 0x3b, 0x0b, 0x1d, 0x15, 0xb1, - 0x46, 0x0a, 0x2a, 0x62, 0x92, 0x69, 0xa7, 0xea, 0x67, 0xa9, 0x9d, 0x58, 0x7a, 0x3b, 0xd8, 0xf7, - 0x88, 0xc6, 0xb7, 0x52, 0x2d, 0x4e, 0xef, 0x3d, 0xde, 0x8a, 0x44, 0x2f, 0x3c, 0x00, 0xeb, 0x8e, - 0x6b, 0x0f, 0x5c, 0xe2, 0x79, 0x5b, 0x04, 0x6b, 0x86, 0x6e, 0x91, 0x30, 0x80, 0xa0, 0xea, 0x5d, - 0x1b, 0x9f, 0x34, 0xd7, 0xf7, 0xe4, 0x10, 0x54, 0x64, 0xdb, 0xfa, 0x53, 0x05, 0x5c, 0xce, 0x9e, - 0x88, 0x05, 0x42, 0x44, 0x39, 0x97, 0x10, 0x79, 0x39, 0x91, 0xa2, 0x81, 0x4a, 0x4b, 0xbc, 0xf3, - 0xe7, 0xd2, 0x74, 0x13, 0x2c, 0x0b, 0xe1, 0x11, 0x76, 0x0a, 0x29, 0x16, 0x2d, 0xcf, 0x41, 0xba, - 0x1b, 0x65, 0xf1, 0xf0, 0x1e, 0x58, 0x74, 0xb9, 0xb6, 0x0a, 0x09, 0x02, 0x7d, 0xf2, 0x15, 0x41, - 0xb0, 0x88, 0x92, 0x9d, 0x28, 0x8d, 0x65, 0xda, 0x24, 0x96, 0x1c, 0x21, 0x41, 0x25, 0xad, 0x4d, - 0x36, 0xb3, 0x00, 0x94, 0xb7, 0x81, 0x3d, 0xb0, 0xe2, 0x5b, 0x79, 0xaa, 0x20, 0xd7, 0xae, 0x09, - 0xaa, 0x95, 0x83, 0x3c, 0x04, 0xc9, 0xec, 0xe0, 0x8f, 0x53, 0x72, 0x65, 0x96, 0x9f, 0x22, 0x2f, - 0x9c, 0xbe, 0x1d, 0xa6, 0xd6, 0x2b, 0x12, 0x1d, 0x55, 0x9b, 0x56, 0x47, 0xb5, 0xde, 0x53, 0x00, - 0xcc, 0x6f, 0xc1, 0x89, 0x2f, 0xf7, 0x39, 0x8b, 0x44, 0x89, 0xd4, 0xe4, 0x0a, 0xe7, 0xe6, 0x64, - 0x85, 0x13, 0x9f, 0xa0, 0xd3, 0x49, 0x1c, 0x31, 0xbd, 0x17, 0x73, 0x31, 0x33, 0x85, 0xc4, 0x89, - 0xfd, 0x79, 0x36, 0x89, 0x93, 0xe0, 0x39, 0x5d, 0xe2, 0xfc, 0xab, 0x04, 0x56, 0x62, 0xf0, 0xd4, - 0x12, 0x47, 0x62, 0xf2, 0xe5, 0xe5, 0xcc, 0x74, 0xb2, 0x23, 0x9e, 0xba, 0xff, 0x13, 0xd9, 0x11, - 0x3b, 0x54, 0x20, 0x3b, 0x7e, 0x57, 0x4a, 0x7a, 0x7d, 0x46, 0xd9, 0xf1, 0x09, 0x5c, 0x55, 0x7c, - 0xee, 0x94, 0x4b, 0xeb, 0xcf, 0x65, 0x70, 0x39, 0xbb, 0x05, 0x53, 0x75, 0x50, 0x99, 0x58, 0x07, - 0xf7, 0xc0, 0xea, 0x23, 0xdf, 0x30, 0x46, 0x3c, 0x86, 0x44, 0x31, 0x0c, 0x2a, 0xe8, 0x57, 0x85, - 0xe5, 0xea, 0xf7, 0x25, 0x18, 0x24, 0xb5, 0xcc, 0x97, 0xc5, 0xca, 0xb3, 0x96, 0xc5, 0xea, 0x39, - 0xca, 0xa2, 0x5c, 0x59, 0x94, 0xcf, 0xa5, 0x2c, 0xa6, 0xae, 0x89, 0x92, 0xe3, 0x6a, 0xe2, 0x3b, - 0xfc, 0x58, 0x01, 0x6b, 0xf2, 0xd7, 0x67, 0x68, 0x80, 0x25, 0x13, 0x3f, 0x4e, 0x5e, 0x5e, 0x4c, - 0x2a, 0x18, 0x3e, 0xd5, 0x0d, 0x35, 0xf8, 0xba, 0xa3, 0xbe, 0x65, 0xd1, 0x5d, 0x77, 0x9f, 0xba, - 0xba, 0x35, 0x08, 0x0a, 0x6c, 0x2f, 0xc5, 0x85, 0x32, 0xdc, 0xf0, 0x21, 0xa8, 0x99, 0xf8, 0xf1, - 0xbe, 0xef, 0x0e, 0xc2, 0x42, 0x78, 0xf6, 0x71, 0x78, 0xee, 0xf7, 0x04, 0x0b, 0x8a, 0xf8, 0x5a, - 0x1f, 0x29, 0x60, 0xbd, 0xa0, 0x82, 0x7e, 0x81, 0xa2, 0xdc, 0x05, 0xd7, 0x53, 0x41, 0xb2, 0x0d, - 0x49, 0x1e, 0xf9, 0x06, 0xdf, 0x9b, 0x42, 0xaf, 0xdc, 0x04, 0xf3, 0x0e, 0x76, 0xa9, 0x1e, 0x09, - 0xdd, 0x6a, 0x67, 0x71, 0x7c, 0xd2, 0x9c, 0xdf, 0x0b, 0x1b, 0x51, 0xdc, 0xdf, 0xfa, 0x55, 0x09, - 0x2c, 0x24, 0x48, 0x2e, 0x40, 0x3b, 0xbc, 0x91, 0xd2, 0x0e, 0xd2, 0xaf, 0x31, 0xc9, 0xa8, 0x8a, - 0xc4, 0x43, 0x2f, 0x23, 0x1e, 0xbe, 0x39, 0x89, 0xe8, 0x74, 0xf5, 0xf0, 0xef, 0x12, 0x58, 0x4d, - 0xa0, 0x63, 0xf9, 0xf0, 0x9d, 0x94, 0x7c, 0xd8, 0xc8, 0xc8, 0x87, 0xba, 0xcc, 0xe6, 0x4b, 0xfd, - 0x30, 0x59, 0x3f, 0xfc, 0x51, 0x01, 0xcb, 0x89, 0xb9, 0xbb, 0x00, 0x01, 0xb1, 0x95, 0x16, 0x10, - 0xcd, 0x09, 0xf9, 0x52, 0xa0, 0x20, 0x7e, 0x3d, 0x9b, 0xf2, 0xfb, 0x0b, 0x7f, 0x73, 0xf1, 0x73, - 0xb0, 0x3a, 0xb4, 0x0d, 0xdf, 0x24, 0x5d, 0x03, 0xeb, 0x66, 0x08, 0x60, 0x15, 0x97, 0x4d, 0xe2, - 0x8b, 0x52, 0x7a, 0xe2, 0x7a, 0xba, 0x47, 0x89, 0x45, 0x1f, 0xc4, 0x96, 0x71, 0x9d, 0x7f, 0x20, - 0xa1, 0x43, 0xd2, 0x41, 0xe0, 0xab, 0x60, 0x81, 0x55, 0x4a, 0xbd, 0x4f, 0x76, 0xb0, 0x19, 0xe6, - 0x54, 0xf4, 0xed, 0x61, 0x3f, 0xee, 0x42, 0x49, 0x1c, 0x3c, 0x02, 0x2b, 0x8e, 0xad, 0xf5, 0xb0, - 0x85, 0x07, 0x84, 0x9d, 0xff, 0x7b, 0xb6, 0xa1, 0xf7, 0x47, 0xfc, 0x4e, 0x63, 0xbe, 0xf3, 0x5a, - 0xf8, 0xbe, 0xba, 0x97, 0x87, 0xb0, 0xf7, 0x01, 0x49, 0x33, 0xdf, 0xcf, 0x32, 0x4a, 0x68, 0xe6, - 0x3e, 0x95, 0xcd, 0xe5, 0xfe, 0x5f, 0x20, 0x4b, 0xae, 0x73, 0x7e, 0x2c, 0x2b, 0xba, 0xad, 0xa9, - 0x9d, 0xeb, 0xb6, 0x46, 0xa2, 0x67, 0xe7, 0xcf, 0xa6, 0x67, 0x5b, 0xef, 0x55, 0xc1, 0x95, 0xdc, - 0x19, 0xfb, 0x19, 0x5e, 0xb9, 0xe4, 0x84, 0x61, 0xf9, 0x0c, 0xc2, 0x70, 0x13, 0x2c, 0x8b, 0xef, - 0x74, 0x19, 0x5d, 0x19, 0xcd, 0x47, 0x37, 0xdd, 0x8d, 0xb2, 0x78, 0xd9, 0x95, 0x4f, 0xf5, 0x8c, - 0x57, 0x3e, 0x49, 0x2f, 0xc4, 0xdf, 0x4b, 0x82, 0xc4, 0xcd, 0x7b, 0x21, 0xfe, 0x65, 0x92, 0xc5, - 0xc3, 0xef, 0x86, 0x59, 0x19, 0x31, 0xcc, 0x71, 0x86, 0x4c, 0x9a, 0x45, 0x04, 0x19, 0xf4, 0x33, - 0x7d, 0x8b, 0x7a, 0x47, 0xf2, 0x2d, 0x6a, 0x63, 0xc2, 0x6e, 0x98, 0xfe, 0x76, 0x47, 0xaa, 0xdd, - 0x17, 0xce, 0xae, 0xdd, 0x5b, 0x7f, 0x55, 0xc0, 0x73, 0x85, 0xfb, 0x11, 0x6e, 0xa6, 0x6a, 0xfe, - 0xad, 0x4c, 0xcd, 0x7f, 0xbe, 0xd0, 0x30, 0x51, 0xf8, 0x4d, 0xf9, 0xc5, 0xcf, 0xdd, 0x89, 0x17, - 0x3f, 0x12, 0x45, 0x37, 0xf9, 0x06, 0xa8, 0xb3, 0xf1, 0xe4, 0x69, 0x63, 0xe6, 0x83, 0xa7, 0x8d, - 0x99, 0x0f, 0x9f, 0x36, 0x66, 0x7e, 0x31, 0x6e, 0x28, 0x4f, 0xc6, 0x0d, 0xe5, 0x83, 0x71, 0x43, - 0xf9, 0x70, 0xdc, 0x50, 0xfe, 0x31, 0x6e, 0x28, 0xbf, 0xfd, 0xa8, 0x31, 0xf3, 0xb0, 0x34, 0xbc, - 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x69, 0x8a, 0x39, 0xfa, 0x26, 0x00, 0x00, + // 2149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1b, 0xc7, + 0x19, 0xd7, 0xf2, 0x21, 0x51, 0x23, 0x4b, 0xb2, 0x47, 0xaa, 0xc4, 0xd8, 0x0d, 0xe9, 0xb2, 0xae, + 0xa3, 0xc4, 0x31, 0x59, 0x3b, 0x4e, 0x50, 0xc4, 0x45, 0x02, 0x91, 0x4a, 0xd3, 0x34, 0x7a, 0x75, + 0x64, 0x39, 0x80, 0x9b, 0x16, 0x1d, 0x2d, 0xc7, 0xd4, 0x46, 0xfb, 0xc2, 0xee, 0x2c, 0x63, 0xa2, + 0x97, 0xa2, 0x40, 0x6f, 0x3d, 0xf4, 0x3f, 0x29, 0x8a, 0xa2, 0xb9, 0x15, 0x41, 0xd0, 0x8b, 0x2f, + 0x45, 0x83, 0x5e, 0x9a, 0x13, 0x51, 0x33, 0xa7, 0xa2, 0xe8, 0xad, 0xbd, 0xf8, 0xd2, 0x62, 0x66, + 0x67, 0xdf, 0xb3, 0x22, 0x25, 0x27, 0xca, 0x03, 0xbe, 0x89, 0x33, 0xbf, 0xef, 0x37, 0xdf, 0xcc, + 0x7c, 0xdf, 0x7c, 0xbf, 0x99, 0x15, 0xb8, 0x7d, 0xf4, 0x3d, 0xb7, 0xa9, 0x59, 0xad, 0x23, 0xef, + 0x80, 0x38, 0x26, 0xa1, 0xc4, 0x6d, 0xf5, 0x89, 0xd9, 0xb5, 0x9c, 0x96, 0xe8, 0xc0, 0xb6, 0xd6, + 0xc2, 0xb6, 0xed, 0xb6, 0xfa, 0x37, 0x5a, 0x3d, 0x62, 0x12, 0x07, 0x53, 0xd2, 0x6d, 0xda, 0x8e, + 0x45, 0x2d, 0x08, 0x7d, 0x4c, 0x13, 0xdb, 0x5a, 0x93, 0x61, 0x9a, 0xfd, 0x1b, 0x17, 0xaf, 0xf7, + 0x34, 0x7a, 0xe8, 0x1d, 0x34, 0x55, 0xcb, 0x68, 0xf5, 0xac, 0x9e, 0xd5, 0xe2, 0xd0, 0x03, 0xef, + 0x3e, 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x7c, 0x8a, 0x8b, 0x8d, 0xd8, 0x30, 0xaa, 0xe5, 0x10, 0xc9, + 0x30, 0x17, 0x6f, 0x45, 0x18, 0x03, 0xab, 0x87, 0x9a, 0x49, 0x9c, 0x41, 0xcb, 0x3e, 0xea, 0xb1, + 0x06, 0xb7, 0x65, 0x10, 0x8a, 0x65, 0x56, 0xad, 0x3c, 0x2b, 0xc7, 0x33, 0xa9, 0x66, 0x90, 0x8c, + 0xc1, 0x2b, 0xe3, 0x0c, 0x5c, 0xf5, 0x90, 0x18, 0x38, 0x63, 0xf7, 0x52, 0x9e, 0x9d, 0x47, 0x35, + 0xbd, 0xa5, 0x99, 0xd4, 0xa5, 0x4e, 0xda, 0xa8, 0xf1, 0x5f, 0x05, 0xc0, 0x8e, 0x65, 0x52, 0xc7, + 0xd2, 0x75, 0xe2, 0x20, 0xd2, 0xd7, 0x5c, 0xcd, 0x32, 0xe1, 0xcf, 0x41, 0x85, 0xcd, 0xa7, 0x8b, + 0x29, 0xae, 0x2a, 0x97, 0x95, 0xb5, 0xb9, 0x9b, 0xdf, 0x6d, 0x46, 0x8b, 0x1c, 0xd2, 0x37, 0xed, + 0xa3, 0x1e, 0x6b, 0x70, 0x9b, 0x0c, 0xdd, 0xec, 0xdf, 0x68, 0xee, 0x1c, 0xbc, 0x47, 0x54, 0xba, + 0x45, 0x28, 0x6e, 0xc3, 0x87, 0xc3, 0xfa, 0xd4, 0x68, 0x58, 0x07, 0x51, 0x1b, 0x0a, 0x59, 0xe1, + 0x0e, 0x28, 0x71, 0xf6, 0x02, 0x67, 0xbf, 0x9e, 0xcb, 0x2e, 0x26, 0xdd, 0x44, 0xf8, 0xfd, 0x37, + 0x1e, 0x50, 0x62, 0x32, 0xf7, 0xda, 0xe7, 0x04, 0x75, 0x69, 0x03, 0x53, 0x8c, 0x38, 0x11, 0x7c, + 0x11, 0x54, 0x1c, 0xe1, 0x7e, 0xb5, 0x78, 0x59, 0x59, 0x2b, 0xb6, 0xcf, 0x0b, 0x54, 0x25, 0x98, + 0x16, 0x0a, 0x11, 0x8d, 0x3f, 0x2b, 0x60, 0x25, 0x3b, 0xef, 0x4d, 0xcd, 0xa5, 0xf0, 0xdd, 0xcc, + 0xdc, 0x9b, 0x93, 0xcd, 0x9d, 0x59, 0xf3, 0x99, 0x87, 0x03, 0x07, 0x2d, 0xb1, 0x79, 0xbf, 0x0d, + 0xca, 0x1a, 0x25, 0x86, 0x5b, 0x2d, 0x5c, 0x2e, 0xae, 0xcd, 0xdd, 0xbc, 0xda, 0xcc, 0xc6, 0x6e, + 0x33, 0xeb, 0x58, 0x7b, 0x5e, 0x50, 0x96, 0xdf, 0x62, 0xc6, 0xc8, 0xe7, 0x68, 0xfc, 0x4f, 0x01, + 0xb3, 0x1b, 0x98, 0x18, 0x96, 0xb9, 0x47, 0xe8, 0x19, 0x6c, 0x5a, 0x07, 0x94, 0x5c, 0x9b, 0xa8, + 0x62, 0xd3, 0xbe, 0x25, 0xf3, 0x3d, 0x74, 0x67, 0xcf, 0x26, 0x6a, 0xb4, 0x51, 0xec, 0x17, 0xe2, + 0xc6, 0xf0, 0x6d, 0x30, 0xed, 0x52, 0x4c, 0x3d, 0x97, 0x6f, 0xd3, 0xdc, 0xcd, 0x6f, 0x1f, 0x4f, + 0xc3, 0xa1, 0xed, 0x05, 0x41, 0x34, 0xed, 0xff, 0x46, 0x82, 0xa2, 0xf1, 0xcf, 0x02, 0x80, 0x21, + 0xb6, 0x63, 0x99, 0x5d, 0x8d, 0xb2, 0xf8, 0x7d, 0x15, 0x94, 0xe8, 0xc0, 0x26, 0x7c, 0x19, 0x66, + 0xdb, 0x57, 0x03, 0x2f, 0xee, 0x0c, 0x6c, 0xf2, 0x78, 0x58, 0x5f, 0xc9, 0x5a, 0xb0, 0x1e, 0xc4, + 0x6d, 0xe0, 0x66, 0xe8, 0x5f, 0x81, 0x5b, 0xdf, 0x4a, 0x0e, 0xfd, 0x78, 0x58, 0x97, 0x1c, 0x16, + 0xcd, 0x90, 0x29, 0xe9, 0x20, 0xec, 0x03, 0xa8, 0x63, 0x97, 0xde, 0x71, 0xb0, 0xe9, 0xfa, 0x23, + 0x69, 0x06, 0x11, 0x33, 0x7f, 0x61, 0xb2, 0xed, 0x61, 0x16, 0xed, 0x8b, 0xc2, 0x0b, 0xb8, 0x99, + 0x61, 0x43, 0x92, 0x11, 0xe0, 0x55, 0x30, 0xed, 0x10, 0xec, 0x5a, 0x66, 0xb5, 0xc4, 0x67, 0x11, + 0x2e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0xe7, 0xc1, 0x8c, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xb5, + 0xcc, 0x81, 0x8b, 0x02, 0x38, 0xb3, 0xe5, 0x37, 0xa3, 0xa0, 0xbf, 0xf1, 0x7b, 0x05, 0xcc, 0x87, + 0x2b, 0x77, 0x06, 0xa9, 0xd2, 0x4e, 0xa6, 0xca, 0xb3, 0xc7, 0xc6, 0x49, 0x4e, 0x86, 0x7c, 0x58, + 0x8c, 0xf9, 0xcc, 0x82, 0x10, 0xfe, 0x14, 0x54, 0x5c, 0xa2, 0x13, 0x95, 0x5a, 0x8e, 0xf0, 0xf9, + 0xa5, 0x09, 0x7d, 0xc6, 0x07, 0x44, 0xdf, 0x13, 0xa6, 0xed, 0x73, 0xcc, 0xe9, 0xe0, 0x17, 0x0a, + 0x29, 0xe1, 0x8f, 0x41, 0x85, 0x12, 0xc3, 0xd6, 0x31, 0x25, 0x22, 0x4d, 0x12, 0xf1, 0xcd, 0xc2, + 0x85, 0x91, 0xed, 0x5a, 0xdd, 0x3b, 0x02, 0xc6, 0x13, 0x25, 0x5c, 0x87, 0xa0, 0x15, 0x85, 0x34, + 0xf0, 0x08, 0x2c, 0x78, 0x76, 0x97, 0x21, 0x29, 0x3b, 0xba, 0x7b, 0x03, 0x11, 0x3e, 0xd7, 0x8e, + 0x5d, 0x90, 0xfd, 0x84, 0x49, 0x7b, 0x45, 0x0c, 0xb0, 0x90, 0x6c, 0x47, 0x29, 0x6a, 0xb8, 0x0e, + 0x16, 0x0d, 0xcd, 0x44, 0x04, 0x77, 0x07, 0x7b, 0x44, 0xb5, 0xcc, 0xae, 0xcb, 0x03, 0xa8, 0xdc, + 0x5e, 0x15, 0x04, 0x8b, 0x5b, 0xc9, 0x6e, 0x94, 0xc6, 0xc3, 0x4d, 0xb0, 0x1c, 0x9c, 0xb3, 0x3f, + 0xd4, 0x5c, 0x6a, 0x39, 0x83, 0x4d, 0xcd, 0xd0, 0x68, 0x75, 0x9a, 0xf3, 0x54, 0x47, 0xc3, 0xfa, + 0x32, 0x92, 0xf4, 0x23, 0xa9, 0x55, 0xe3, 0x37, 0xd3, 0x60, 0x31, 0x75, 0x1a, 0xc0, 0xbb, 0x60, + 0x45, 0xf5, 0x1c, 0x87, 0x98, 0x74, 0xdb, 0x33, 0x0e, 0x88, 0xb3, 0xa7, 0x1e, 0x92, 0xae, 0xa7, + 0x93, 0x2e, 0xdf, 0xd1, 0x72, 0xbb, 0x26, 0x7c, 0x5d, 0xe9, 0x48, 0x51, 0x28, 0xc7, 0x1a, 0xfe, + 0x08, 0x40, 0x93, 0x37, 0x6d, 0x69, 0xae, 0x1b, 0x72, 0x16, 0x38, 0x67, 0x98, 0x80, 0xdb, 0x19, + 0x04, 0x92, 0x58, 0x31, 0x1f, 0xbb, 0xc4, 0xd5, 0x1c, 0xd2, 0x4d, 0xfb, 0x58, 0x4c, 0xfa, 0xb8, + 0x21, 0x45, 0xa1, 0x1c, 0x6b, 0xf8, 0x32, 0x98, 0xf3, 0x47, 0xe3, 0x6b, 0x2e, 0x36, 0x67, 0x49, + 0x90, 0xcd, 0x6d, 0x47, 0x5d, 0x28, 0x8e, 0x63, 0x53, 0xb3, 0x0e, 0x5c, 0xe2, 0xf4, 0x49, 0xf7, + 0x4d, 0x5f, 0x03, 0xb0, 0x42, 0x59, 0xe6, 0x85, 0x32, 0x9c, 0xda, 0x4e, 0x06, 0x81, 0x24, 0x56, + 0x6c, 0x6a, 0x7e, 0xd4, 0x64, 0xa6, 0x36, 0x9d, 0x9c, 0xda, 0xbe, 0x14, 0x85, 0x72, 0xac, 0x59, + 0xec, 0xf9, 0x2e, 0xaf, 0xf7, 0xb1, 0xa6, 0xe3, 0x03, 0x9d, 0x54, 0x67, 0x92, 0xb1, 0xb7, 0x9d, + 0xec, 0x46, 0x69, 0x3c, 0x7c, 0x13, 0x5c, 0xf0, 0x9b, 0xf6, 0x4d, 0x1c, 0x92, 0x54, 0x38, 0xc9, + 0x33, 0x82, 0xe4, 0xc2, 0x76, 0x1a, 0x80, 0xb2, 0x36, 0xf0, 0x55, 0xb0, 0xa0, 0x5a, 0xba, 0xce, + 0xe3, 0xb1, 0x63, 0x79, 0x26, 0xad, 0xce, 0x72, 0x16, 0xc8, 0x72, 0xa8, 0x93, 0xe8, 0x41, 0x29, + 0x24, 0xbc, 0x07, 0x80, 0x1a, 0x94, 0x03, 0xb7, 0x0a, 0xf2, 0x0b, 0x7d, 0xb6, 0x0e, 0x45, 0x05, + 0x38, 0x6c, 0x72, 0x51, 0x8c, 0xad, 0xf1, 0xa1, 0x02, 0x56, 0x73, 0x72, 0x1c, 0xbe, 0x9e, 0xa8, + 0x7a, 0xd7, 0x52, 0x55, 0xef, 0x52, 0x8e, 0x59, 0xac, 0xf4, 0xa9, 0x60, 0x9e, 0xe9, 0x0e, 0xcd, + 0xec, 0xf9, 0x10, 0x71, 0x82, 0xbd, 0x20, 0xf3, 0x1d, 0xc5, 0x81, 0xd1, 0x31, 0x7c, 0x61, 0x34, + 0xac, 0xcf, 0x27, 0xfa, 0x50, 0x92, 0xb3, 0xf1, 0xab, 0x02, 0x00, 0x1b, 0xc4, 0xd6, 0xad, 0x81, + 0x41, 0xcc, 0xb3, 0x50, 0x2d, 0x1b, 0x09, 0xd5, 0xd2, 0x90, 0x6e, 0x44, 0xe8, 0x4f, 0xae, 0x6c, + 0xd9, 0x4c, 0xc9, 0x96, 0x2b, 0x63, 0x78, 0x8e, 0xd7, 0x2d, 0x7f, 0x2f, 0x82, 0xa5, 0x08, 0x1c, + 0x09, 0x97, 0xdb, 0x89, 0x2d, 0x7c, 0x2e, 0xb5, 0x85, 0xab, 0x12, 0x93, 0xcf, 0x4d, 0xb9, 0xbc, + 0x07, 0x16, 0x98, 0xae, 0xf0, 0x77, 0x8d, 0xab, 0x96, 0xe9, 0x13, 0xab, 0x96, 0xb0, 0xea, 0x6c, + 0x26, 0x98, 0x50, 0x8a, 0x39, 0x47, 0x25, 0xcd, 0x7c, 0x15, 0x55, 0xd2, 0x1f, 0x14, 0xb0, 0x10, + 0x6d, 0xd3, 0x19, 0xc8, 0xa4, 0x4e, 0x52, 0x26, 0xd5, 0x8e, 0x8f, 0xcb, 0x1c, 0x9d, 0xf4, 0xb7, + 0x52, 0xdc, 0x6b, 0x2e, 0x94, 0xd6, 0xd8, 0x85, 0xca, 0xd6, 0x35, 0x15, 0xbb, 0xa2, 0xac, 0x9e, + 0xf3, 0x2f, 0x53, 0x7e, 0x1b, 0x0a, 0x7b, 0x13, 0x92, 0xaa, 0xf0, 0xf9, 0x4a, 0xaa, 0xe2, 0x67, + 0x23, 0xa9, 0xee, 0x80, 0x8a, 0x1b, 0x88, 0xa9, 0x12, 0xa7, 0xbc, 0x3a, 0x2e, 0x9d, 0x85, 0x8e, + 0x0a, 0x59, 0x43, 0x05, 0x15, 0x32, 0xc9, 0xb4, 0x53, 0xf9, 0x8b, 0xd4, 0x4e, 0x2c, 0xbc, 0x6d, + 0xec, 0xb9, 0xa4, 0xcb, 0x53, 0xa9, 0x12, 0x85, 0xf7, 0x2e, 0x6f, 0x45, 0xa2, 0x17, 0xee, 0x83, + 0x55, 0xdb, 0xb1, 0x7a, 0x0e, 0x71, 0xdd, 0x0d, 0x82, 0xbb, 0xba, 0x66, 0x92, 0x60, 0x02, 0x7e, + 0xd5, 0xbb, 0x34, 0x1a, 0xd6, 0x57, 0x77, 0xe5, 0x10, 0x94, 0x67, 0xdb, 0xf8, 0x53, 0x09, 0x9c, + 0x4f, 0x9f, 0x88, 0x39, 0x42, 0x44, 0x39, 0x95, 0x10, 0x79, 0x31, 0x16, 0xa2, 0xbe, 0x4a, 0x8b, + 0xdd, 0xf9, 0x33, 0x61, 0xba, 0x0e, 0x16, 0x85, 0xf0, 0x08, 0x3a, 0x85, 0x14, 0x0b, 0xb7, 0x67, + 0x3f, 0xd9, 0x8d, 0xd2, 0x78, 0x78, 0x1b, 0xcc, 0x3b, 0x5c, 0x5b, 0x05, 0x04, 0xbe, 0x3e, 0xf9, + 0x86, 0x20, 0x98, 0x47, 0xf1, 0x4e, 0x94, 0xc4, 0x32, 0x6d, 0x12, 0x49, 0x8e, 0x80, 0xa0, 0x94, + 0xd4, 0x26, 0xeb, 0x69, 0x00, 0xca, 0xda, 0xc0, 0x2d, 0xb0, 0xe4, 0x99, 0x59, 0x2a, 0x3f, 0xd6, + 0x2e, 0x09, 0xaa, 0xa5, 0xfd, 0x2c, 0x04, 0xc9, 0xec, 0xe0, 0x4f, 0x12, 0x72, 0x65, 0x9a, 0x9f, + 0x22, 0xcf, 0x1d, 0x9f, 0x0e, 0x13, 0xeb, 0x15, 0x89, 0x8e, 0xaa, 0x4c, 0xaa, 0xa3, 0x1a, 0x1f, + 0x28, 0x00, 0x66, 0x53, 0x70, 0xec, 0xe5, 0x3e, 0x63, 0x11, 0x2b, 0x91, 0x5d, 0xb9, 0xc2, 0xb9, + 0x36, 0x5e, 0xe1, 0x44, 0x27, 0xe8, 0x64, 0x12, 0x47, 0x2c, 0xef, 0xd9, 0x3c, 0xcc, 0x4c, 0x20, + 0x71, 0x22, 0x7f, 0x9e, 0x4c, 0xe2, 0xc4, 0x78, 0x8e, 0x97, 0x38, 0xff, 0x2a, 0x80, 0xa5, 0x08, + 0x3c, 0xb1, 0xc4, 0x91, 0x98, 0x3c, 0x7d, 0x9c, 0x99, 0x4c, 0x76, 0x44, 0x4b, 0xf7, 0x25, 0x91, + 0x1d, 0x91, 0x43, 0x39, 0xb2, 0xe3, 0x77, 0x85, 0xb8, 0xd7, 0x27, 0x94, 0x1d, 0x9f, 0xc1, 0x53, + 0xc5, 0x57, 0x4e, 0xb9, 0x34, 0x3e, 0x2a, 0x82, 0xf3, 0xe9, 0x14, 0x4c, 0xd4, 0x41, 0x65, 0x6c, + 0x1d, 0xdc, 0x05, 0xcb, 0xf7, 0x3d, 0x5d, 0x1f, 0xf0, 0x39, 0xc4, 0x8a, 0xa1, 0x5f, 0x41, 0xbf, + 0x29, 0x2c, 0x97, 0x7f, 0x20, 0xc1, 0x20, 0xa9, 0x65, 0xb6, 0x2c, 0x96, 0x9e, 0xb4, 0x2c, 0x96, + 0x4f, 0x51, 0x16, 0xe5, 0xca, 0xa2, 0x78, 0x2a, 0x65, 0x31, 0x71, 0x4d, 0x94, 0x1c, 0x57, 0x63, + 0xef, 0xf0, 0x23, 0x05, 0xac, 0xc8, 0xaf, 0xcf, 0x50, 0x07, 0x0b, 0x06, 0x7e, 0x10, 0x7f, 0xbc, + 0x18, 0x57, 0x30, 0x3c, 0xaa, 0xe9, 0x4d, 0xff, 0xeb, 0x4e, 0xf3, 0x2d, 0x93, 0xee, 0x38, 0x7b, + 0xd4, 0xd1, 0xcc, 0x9e, 0x5f, 0x60, 0xb7, 0x12, 0x5c, 0x28, 0xc5, 0x0d, 0xef, 0x81, 0x8a, 0x81, + 0x1f, 0xec, 0x79, 0x4e, 0x2f, 0x28, 0x84, 0x27, 0x1f, 0x87, 0xc7, 0xfe, 0x96, 0x60, 0x41, 0x21, + 0x5f, 0xe3, 0x53, 0x05, 0xac, 0xe6, 0x54, 0xd0, 0xaf, 0xd1, 0x2c, 0x77, 0xc0, 0xe5, 0xc4, 0x24, + 0x59, 0x42, 0x92, 0xfb, 0x9e, 0xce, 0x73, 0x53, 0xe8, 0x95, 0x6b, 0x60, 0xd6, 0xc6, 0x0e, 0xd5, + 0x42, 0xa1, 0x5b, 0x6e, 0xcf, 0x8f, 0x86, 0xf5, 0xd9, 0xdd, 0xa0, 0x11, 0x45, 0xfd, 0x8d, 0x5f, + 0x17, 0xc0, 0x5c, 0x8c, 0xe4, 0x0c, 0xb4, 0xc3, 0x1b, 0x09, 0xed, 0x20, 0xfd, 0x1a, 0x13, 0x9f, + 0x55, 0x9e, 0x78, 0xd8, 0x4a, 0x89, 0x87, 0xef, 0x8c, 0x23, 0x3a, 0x5e, 0x3d, 0xfc, 0xbb, 0x00, + 0x96, 0x63, 0xe8, 0x48, 0x3e, 0x7c, 0x3f, 0x21, 0x1f, 0xd6, 0x52, 0xf2, 0xa1, 0x2a, 0xb3, 0x79, + 0xaa, 0x1f, 0xc6, 0xeb, 0x87, 0x3f, 0x2a, 0x60, 0x31, 0xb6, 0x76, 0x67, 0x20, 0x20, 0x36, 0x92, + 0x02, 0xa2, 0x3e, 0x26, 0x5e, 0x72, 0x14, 0xc4, 0x7f, 0x14, 0xd0, 0x8a, 0xa1, 0x76, 0x89, 0xe3, + 0x6a, 0x2e, 0x25, 0x26, 0xbd, 0x6b, 0xe9, 0x9e, 0x41, 0x3a, 0x3a, 0xd6, 0x0c, 0x44, 0x58, 0x83, + 0x66, 0x99, 0xbb, 0x96, 0xae, 0xa9, 0x03, 0x88, 0xc1, 0xdc, 0xfb, 0x87, 0xc4, 0xdc, 0x20, 0x3a, + 0xa1, 0xe2, 0x9b, 0xc1, 0x6c, 0xfb, 0xf5, 0xe0, 0x09, 0xfd, 0x9d, 0xa8, 0xeb, 0xf1, 0xb0, 0xbe, + 0x36, 0x09, 0x23, 0x0f, 0xb0, 0x38, 0x27, 0xfc, 0x19, 0x00, 0xec, 0xe7, 0x9e, 0x8a, 0x83, 0x2f, + 0x08, 0xb3, 0xed, 0xd7, 0x82, 0x34, 0x7c, 0x27, 0xec, 0x39, 0xd1, 0x00, 0x31, 0xc6, 0xc6, 0x5f, + 0x67, 0x12, 0xdb, 0xf5, 0xb5, 0x7f, 0xb0, 0xf9, 0x05, 0x58, 0xee, 0x47, 0xab, 0x13, 0x00, 0x98, + 0xd0, 0x60, 0xb1, 0xf3, 0xbc, 0x94, 0x5e, 0xb6, 0xae, 0x91, 0xbc, 0xb9, 0x2b, 0xa1, 0x43, 0xd2, + 0x41, 0xe0, 0xcb, 0x60, 0x8e, 0x09, 0x04, 0x4d, 0x25, 0xdb, 0xd8, 0x08, 0x52, 0x29, 0xfc, 0xe4, + 0xb2, 0x17, 0x75, 0xa1, 0x38, 0x0e, 0x1e, 0x82, 0x25, 0xdb, 0xea, 0x6e, 0x61, 0x13, 0xf7, 0x08, + 0x2b, 0x7b, 0xfe, 0x56, 0xf2, 0xa7, 0x9c, 0xd9, 0xf6, 0x2b, 0xc1, 0x35, 0x7d, 0x37, 0x0b, 0x61, + 0xd7, 0x20, 0x49, 0x33, 0x0f, 0x02, 0x19, 0x25, 0x34, 0x32, 0x5f, 0x08, 0x67, 0x32, 0xff, 0x56, + 0x21, 0xcb, 0xa9, 0x53, 0x7e, 0x23, 0xcc, 0x7b, 0xa4, 0xaa, 0x9c, 0xea, 0x91, 0x4a, 0x22, 0xe3, + 0x67, 0x4f, 0x28, 0xe3, 0x3f, 0x52, 0xc0, 0x15, 0x7b, 0x82, 0x34, 0xaa, 0x02, 0xbe, 0x2c, 0x9d, + 0x31, 0xcb, 0x32, 0x49, 0x46, 0xb6, 0xd7, 0x46, 0xc3, 0xfa, 0x95, 0x49, 0x90, 0x68, 0x22, 0xd7, + 0x1a, 0x1f, 0x94, 0xc1, 0x85, 0x4c, 0x79, 0xfc, 0x02, 0x5f, 0xcb, 0x32, 0x9a, 0xbe, 0x78, 0x02, + 0x4d, 0xbf, 0x0e, 0x16, 0xc5, 0x27, 0xd6, 0xd4, 0x95, 0x20, 0xdc, 0xd3, 0x4e, 0xb2, 0x1b, 0xa5, + 0xf1, 0xb2, 0xd7, 0xba, 0xf2, 0x09, 0x5f, 0xeb, 0xe2, 0x5e, 0x88, 0xff, 0x0c, 0xf2, 0x93, 0x2f, + 0xeb, 0x85, 0xf8, 0x07, 0xa1, 0x34, 0x1e, 0xbe, 0x16, 0x64, 0x56, 0xc8, 0x30, 0xc3, 0x19, 0x52, + 0xa9, 0x12, 0x12, 0xa4, 0xd0, 0x4f, 0xf4, 0x19, 0xf1, 0x5d, 0xc9, 0x67, 0xc4, 0xb5, 0x31, 0xa1, + 0x3b, 0xf9, 0xc3, 0x9c, 0xf4, 0xda, 0x35, 0x77, 0xf2, 0x6b, 0x57, 0xe3, 0x2f, 0x0a, 0x78, 0x26, + 0xf7, 0x4c, 0x81, 0xeb, 0x09, 0xb9, 0x76, 0x3d, 0x25, 0xd7, 0x9e, 0xcd, 0x35, 0x8c, 0x69, 0x36, + 0x43, 0xfe, 0x66, 0x77, 0x6b, 0xec, 0x9b, 0x9d, 0x44, 0x8c, 0x8f, 0x7f, 0xbc, 0x6b, 0xaf, 0x3d, + 0x7c, 0x54, 0x9b, 0xfa, 0xf8, 0x51, 0x6d, 0xea, 0x93, 0x47, 0xb5, 0xa9, 0x5f, 0x8e, 0x6a, 0xca, + 0xc3, 0x51, 0x4d, 0xf9, 0x78, 0x54, 0x53, 0x3e, 0x19, 0xd5, 0x94, 0x7f, 0x8c, 0x6a, 0xca, 0x6f, + 0x3f, 0xad, 0x4d, 0xdd, 0x2b, 0xf4, 0x6f, 0xfc, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x7f, 0x6b, + 0xc0, 0xb5, 0x28, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -2291,6 +2329,39 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.WhenScaled) + copy(dAtA[i:], m.WhenScaled) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenScaled))) + i-- + dAtA[i] = 0x12 + i -= len(m.WhenDeleted) + copy(dAtA[i:], m.WhenDeleted) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenDeleted))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2311,6 +2382,18 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PersistentVolumeClaimRetentionPolicy != nil { + { + size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) i-- dAtA[i] = 0x48 @@ -2955,6 +3038,19 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WhenDeleted) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenScaled) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetSpec) Size() (n int) { if m == nil { return 0 @@ -2986,6 +3082,10 @@ func (m *StatefulSetSpec) Size() (n int) { n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.PersistentVolumeClaimRetentionPolicy != nil { + l = m.PersistentVolumeClaimRetentionPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3399,6 +3499,17 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetPersistentVolumeClaimRetentionPolicy{`, + `WhenDeleted:` + fmt.Sprintf("%v", this.WhenDeleted) + `,`, + `WhenScaled:` + fmt.Sprintf("%v", this.WhenScaled) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetSpec) String() string { if this == nil { return "nil" @@ -3418,6 +3529,7 @@ func (this *StatefulSetSpec) String() string { `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, `}`, }, "") return s @@ -7461,6 +7573,120 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenDeleted", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenDeleted = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenScaled", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenScaled = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7749,6 +7975,42 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaimRetentionPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaimRetentionPolicy == nil { + m.PersistentVolumeClaimRetentionPolicy = &StatefulSetPersistentVolumeClaimRetentionPolicy{} + } + if err := m.PersistentVolumeClaimRetentionPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 6e593a379..6e5517d81 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -165,8 +165,8 @@ message DaemonSetStatus { // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ optional int32 desiredNumberScheduled = 3; - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. + // numberReady is the number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running with a Ready Condition. optional int32 numberReady = 4; // The most recent generation observed by the daemon set controller. @@ -322,7 +322,7 @@ message DeploymentStatus { // +optional optional int32 updatedReplicas = 3; - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment with a Ready Condition. // +optional optional int32 readyReplicas = 7; @@ -457,7 +457,7 @@ message ReplicaSetStatus { // +optional optional int32 fullyLabeledReplicas = 2; - // The number of ready replicas for this replica set. + // readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. // +optional optional int32 readyReplicas = 4; @@ -611,6 +611,23 @@ message StatefulSetList { repeated StatefulSet items = 2; } +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +message StatefulSetPersistentVolumeClaimRetentionPolicy { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + optional string whenDeleted = 1; + + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + optional string whenScaled = 2; +} + // A StatefulSetSpec is the specification of a StatefulSet. message StatefulSetSpec { // replicas is the desired number of replicas of the given Template. @@ -677,6 +694,15 @@ message StatefulSetSpec { // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional optional int32 minReadySeconds = 9; + + // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent + // volume claims created from volumeClaimTemplates. By default, all persistent + // volume claims are created as needed and retained until manually deleted. This + // policy allows the lifecycle to be altered, for example by deleting persistent + // volume claims when their stateful set is deleted, or when their pod is scaled + // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, + // which is alpha. +optional + optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -689,7 +715,7 @@ message StatefulSetStatus { // replicas is the number of Pods created by the StatefulSet controller. optional int32 replicas = 2; - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created for this StatefulSet with a Ready Condition. optional int32 readyReplicas = 3; // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -721,9 +747,7 @@ message StatefulSetStatus { repeated StatefulSetCondition conditions = 10; // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. - // Remove omitempty when graduating to beta - // +optional + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 51c9fa43d..469b47297 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -61,6 +61,7 @@ type StatefulSet struct { } // PodManagementPolicyType defines the policy for creating pods under a stateful set. +// +enum type PodManagementPolicyType string const ( @@ -90,6 +91,7 @@ type StatefulSetUpdateStrategy struct { // StatefulSetUpdateStrategyType is a string enumeration type that enumerates // all possible update strategies for the StatefulSet controller. +// +enum type StatefulSetUpdateStrategyType string const ( @@ -116,6 +118,40 @@ type RollingUpdateStatefulSetStrategy struct { Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` } +// PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine +// when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is +// deleted or scaled down. +type PersistentVolumeClaimRetentionPolicyType string + +const ( + // RetainPersistentVolumeClaimRetentionPolicyType is the default + // PersistentVolumeClaimRetentionPolicy and specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will not be deleted. + RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" + // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will be deleted in the scenario specified in + // StatefulSetPersistentVolumeClaimRetentionPolicy. + DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +type StatefulSetPersistentVolumeClaimRetentionPolicy struct { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -182,6 +218,15 @@ type StatefulSetSpec struct { // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent + // volume claims created from volumeClaimTemplates. By default, all persistent + // volume claims are created as needed and retained until manually deleted. This + // policy allows the lifecycle to be altered, for example by deleting persistent + // volume claims when their stateful set is deleted, or when their pod is scaled + // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, + // which is alpha. +optional + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -194,7 +239,7 @@ type StatefulSetStatus struct { // replicas is the number of Pods created by the StatefulSet controller. Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created for this StatefulSet with a Ready Condition. ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -226,10 +271,8 @@ type StatefulSetStatus struct { Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. - // Remove omitempty when graduating to beta - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,11,opt,name=availableReplicas"` + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } type StatefulSetConditionType string @@ -354,6 +397,7 @@ type DeploymentStrategy struct { RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` } +// +enum type DeploymentStrategyType string const ( @@ -408,7 +452,7 @@ type DeploymentStatus struct { // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` @@ -495,6 +539,7 @@ type DaemonSetUpdateStrategy struct { RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` } +// +enum type DaemonSetUpdateStrategyType string const ( @@ -597,8 +642,8 @@ type DaemonSetStatus struct { // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. + // numberReady is the number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running with a Ready Condition. NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` // The most recent generation observed by the daemon set controller. @@ -785,7 +830,7 @@ type ReplicaSetStatus struct { // +optional FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - // The number of ready replicas for this replica set. + // readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 39c733adb..f640f9cdd 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -100,7 +100,7 @@ var map_DaemonSetStatus = map[string]string{ "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "numberReady": "numberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition.", "observedGeneration": "The most recent generation observed by the daemon set controller.", "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", @@ -179,7 +179,7 @@ var map_DeploymentStatus = map[string]string{ "observedGeneration": "The generation observed by the deployment controller.", "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", + "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", "conditions": "Represents the latest available observations of a deployment's current state.", @@ -250,7 +250,7 @@ var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "readyReplicas": "The number of ready replicas for this replica set.", + "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", "conditions": "Represents the latest available observations of a replica set's current state.", @@ -323,17 +323,28 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ + "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", + "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", +} + +func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { + return map_StatefulSetPersistentVolumeClaimRetentionPolicy +} + var map_StatefulSetSpec = map[string]string{ - "": "A StatefulSetSpec is the specification of a StatefulSet.", - "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", - "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", - "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", - "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -344,14 +355,14 @@ var map_StatefulSetStatus = map[string]string{ "": "StatefulSetStatus represents the current state of a StatefulSet.", "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", "replicas": "replicas is the number of Pods created by the StatefulSet controller.", - "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "readyReplicas": "readyReplicas is the number of pods created for this StatefulSet with a Ready Condition.", "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. Remove omitempty when graduating to beta", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 0c8054852..8e4d4261a 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -687,6 +688,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy { + if in == nil { + return nil + } + out := new(StatefulSetPersistentVolumeClaimRetentionPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = *in @@ -714,6 +731,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(int32) **out = **in } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } return } diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 79e39e582..74584223c 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -553,10 +553,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { + *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} +} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{18} +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.Merge(m, src) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.InternalMessageInfo + func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{18} + return fileDescriptor_2a07313e8f66e805, []int{19} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +614,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{19} + return fileDescriptor_2a07313e8f66e805, []int{20} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +642,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{20} + return fileDescriptor_2a07313e8f66e805, []int{21} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -658,6 +688,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") + proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") @@ -668,124 +699,130 @@ func init() { } var fileDescriptor_2a07313e8f66e805 = []byte{ - // 1869 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0x77, 0x8f, 0x3d, 0xf6, 0xcc, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0x35, - 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, - 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0xab, - 0x87, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x4a, 0xce, 0xfc, 0x15, 0xdc, 0x40, 0x70, 0xe3, 0xb4, 0xc7, - 0x88, 0x0b, 0x39, 0x59, 0xec, 0xe4, 0x0a, 0x47, 0x2e, 0x2b, 0x21, 0xa1, 0xaa, 0xae, 0xfe, 0xee, - 0xb6, 0xdb, 0x48, 0x6b, 0x09, 0x6e, 0xd3, 0xf5, 0xde, 0xfb, 0xbd, 0x57, 0x55, 0xef, 0xbd, 0x7a, - 0xbf, 0x81, 0x1f, 0x9d, 0xbd, 0xe7, 0xa9, 0xba, 0xdd, 0x3b, 0xf3, 0x07, 0xd4, 0xb5, 0x28, 0xa3, - 0x5e, 0x6f, 0x4c, 0xad, 0xa1, 0xed, 0xf6, 0xa4, 0x80, 0x38, 0x7a, 0x8f, 0x38, 0x8e, 0xd7, 0x1b, - 0xdf, 0x1e, 0x50, 0x46, 0x6e, 0xf7, 0x46, 0xd4, 0xa2, 0x2e, 0x61, 0x74, 0xa8, 0x3a, 0xae, 0xcd, - 0x6c, 0xb4, 0x16, 0x28, 0xaa, 0xc4, 0xd1, 0x55, 0xae, 0xa8, 0x4a, 0xc5, 0xf5, 0xed, 0x91, 0xce, - 0x1e, 0xf9, 0x03, 0x55, 0xb3, 0xcd, 0xde, 0xc8, 0x1e, 0xd9, 0x3d, 0xa1, 0x3f, 0xf0, 0x1f, 0x8a, - 0x2f, 0xf1, 0x21, 0x7e, 0x05, 0x38, 0xeb, 0xdd, 0x84, 0x43, 0xcd, 0x76, 0x69, 0x6f, 0x9c, 0xf3, - 0xb5, 0xfe, 0x4e, 0xac, 0x63, 0x12, 0xed, 0x91, 0x6e, 0x51, 0x77, 0xd2, 0x73, 0xce, 0x46, 0x7c, - 0xc1, 0xeb, 0x99, 0x94, 0x91, 0x22, 0xab, 0x5e, 0x99, 0x95, 0xeb, 0x5b, 0x4c, 0x37, 0x69, 0xce, - 0xe0, 0xdd, 0xcb, 0x0c, 0x3c, 0xed, 0x11, 0x35, 0x49, 0xce, 0xee, 0xed, 0x32, 0x3b, 0x9f, 0xe9, - 0x46, 0x4f, 0xb7, 0x98, 0xc7, 0xdc, 0xac, 0x51, 0xf7, 0x5f, 0x0a, 0xa0, 0x3d, 0xdb, 0x62, 0xae, - 0x6d, 0x18, 0xd4, 0xc5, 0x74, 0xac, 0x7b, 0xba, 0x6d, 0xa1, 0xcf, 0xa1, 0xc1, 0xf7, 0x33, 0x24, - 0x8c, 0xb4, 0x95, 0x4d, 0x65, 0x6b, 0x71, 0xe7, 0x2d, 0x35, 0x3e, 0xe9, 0x08, 0x5e, 0x75, 0xce, - 0x46, 0x7c, 0xc1, 0x53, 0xb9, 0xb6, 0x3a, 0xbe, 0xad, 0x1e, 0x0e, 0xbe, 0xa0, 0x1a, 0x3b, 0xa0, - 0x8c, 0xf4, 0xd1, 0x93, 0xf3, 0x8d, 0x99, 0xe9, 0xf9, 0x06, 0xc4, 0x6b, 0x38, 0x42, 0x45, 0x87, - 0x30, 0x27, 0xd0, 0x6b, 0x02, 0x7d, 0xbb, 0x14, 0x5d, 0x6e, 0x5a, 0xc5, 0xe4, 0x57, 0xf7, 0x1f, - 0x33, 0x6a, 0xf1, 0xf0, 0xfa, 0x2f, 0x48, 0xe8, 0xb9, 0x7b, 0x84, 0x11, 0x2c, 0x80, 0xd0, 0x9b, - 0xd0, 0x70, 0x65, 0xf8, 0xed, 0xd9, 0x4d, 0x65, 0x6b, 0xb6, 0x7f, 0x43, 0x6a, 0x35, 0xc2, 0x6d, - 0xe1, 0x48, 0xa3, 0xfb, 0x44, 0x81, 0x5b, 0xf9, 0x7d, 0xef, 0xeb, 0x1e, 0x43, 0xbf, 0xc8, 0xed, - 0x5d, 0xad, 0xb6, 0x77, 0x6e, 0x2d, 0x76, 0x1e, 0x39, 0x0e, 0x57, 0x12, 0xfb, 0x3e, 0x82, 0xba, - 0xce, 0xa8, 0xe9, 0xb5, 0x6b, 0x9b, 0xb3, 0x5b, 0x8b, 0x3b, 0x6f, 0xa8, 0x25, 0x09, 0xac, 0xe6, - 0xa3, 0xeb, 0x2f, 0x49, 0xdc, 0xfa, 0x03, 0x8e, 0x80, 0x03, 0xa0, 0xee, 0x6f, 0x6b, 0x00, 0xf7, - 0xa8, 0x63, 0xd8, 0x13, 0x93, 0x5a, 0xec, 0x1a, 0xae, 0xee, 0x01, 0xcc, 0x79, 0x0e, 0xd5, 0xe4, - 0xd5, 0xbd, 0x5a, 0xba, 0x83, 0x38, 0xa8, 0x63, 0x87, 0x6a, 0xf1, 0xa5, 0xf1, 0x2f, 0x2c, 0x20, - 0xd0, 0x27, 0x30, 0xef, 0x31, 0xc2, 0x7c, 0x4f, 0x5c, 0xd9, 0xe2, 0xce, 0x6b, 0x55, 0xc0, 0x84, - 0x41, 0xbf, 0x25, 0xe1, 0xe6, 0x83, 0x6f, 0x2c, 0x81, 0xba, 0x7f, 0x9b, 0x85, 0x95, 0x58, 0x79, - 0xcf, 0xb6, 0x86, 0x3a, 0xe3, 0x29, 0x7d, 0x17, 0xe6, 0xd8, 0xc4, 0xa1, 0xe2, 0x4c, 0x9a, 0xfd, - 0x57, 0xc3, 0x60, 0x4e, 0x26, 0x0e, 0x7d, 0x76, 0xbe, 0xb1, 0x56, 0x60, 0xc2, 0x45, 0x58, 0x18, - 0xa1, 0xfd, 0x28, 0xce, 0x9a, 0x30, 0x7f, 0x27, 0xed, 0xfc, 0xd9, 0xf9, 0x46, 0x41, 0x03, 0x51, - 0x23, 0xa4, 0x74, 0x88, 0xe8, 0x0b, 0x68, 0x19, 0xc4, 0x63, 0xa7, 0xce, 0x90, 0x30, 0x7a, 0xa2, - 0x9b, 0xb4, 0x3d, 0x2f, 0x76, 0xff, 0x7a, 0xb5, 0x8b, 0xe2, 0x16, 0xfd, 0x5b, 0x32, 0x82, 0xd6, - 0x7e, 0x0a, 0x09, 0x67, 0x90, 0xd1, 0x18, 0x10, 0x5f, 0x39, 0x71, 0x89, 0xe5, 0x05, 0xbb, 0xe2, - 0xfe, 0x16, 0xae, 0xec, 0x6f, 0x5d, 0xfa, 0x43, 0xfb, 0x39, 0x34, 0x5c, 0xe0, 0x01, 0xbd, 0x02, - 0xf3, 0x2e, 0x25, 0x9e, 0x6d, 0xb5, 0xe7, 0xc4, 0x89, 0x45, 0xd7, 0x85, 0xc5, 0x2a, 0x96, 0x52, - 0xf4, 0x1a, 0x2c, 0x98, 0xd4, 0xf3, 0xc8, 0x88, 0xb6, 0xeb, 0x42, 0x71, 0x59, 0x2a, 0x2e, 0x1c, - 0x04, 0xcb, 0x38, 0x94, 0x77, 0xff, 0xa8, 0x40, 0x2b, 0xbe, 0xa6, 0x6b, 0xa8, 0xd5, 0x8f, 0xd2, - 0xb5, 0xfa, 0xfd, 0x0a, 0xc9, 0x59, 0x52, 0xa3, 0xff, 0xa8, 0x01, 0x8a, 0x95, 0xb0, 0x6d, 0x18, - 0x03, 0xa2, 0x9d, 0xa1, 0x4d, 0x98, 0xb3, 0x88, 0x19, 0xe6, 0x64, 0x54, 0x20, 0x3f, 0x21, 0x26, - 0xc5, 0x42, 0x82, 0xbe, 0x54, 0x00, 0xf9, 0xe2, 0x36, 0x87, 0xbb, 0x96, 0x65, 0x33, 0xc2, 0x0f, - 0x38, 0x0c, 0x68, 0xaf, 0x42, 0x40, 0xa1, 0x2f, 0xf5, 0x34, 0x87, 0x72, 0xdf, 0x62, 0xee, 0x24, - 0xbe, 0xd8, 0xbc, 0x02, 0x2e, 0x70, 0x8d, 0x7e, 0x0e, 0xe0, 0x4a, 0xcc, 0x13, 0x5b, 0x96, 0x6d, - 0x79, 0x0f, 0x08, 0xdd, 0xef, 0xd9, 0xd6, 0x43, 0x7d, 0x14, 0x37, 0x16, 0x1c, 0x41, 0xe0, 0x04, - 0xdc, 0xfa, 0x7d, 0x58, 0x2b, 0x89, 0x13, 0xdd, 0x80, 0xd9, 0x33, 0x3a, 0x09, 0x8e, 0x0a, 0xf3, - 0x9f, 0x68, 0x15, 0xea, 0x63, 0x62, 0xf8, 0x34, 0xa8, 0x49, 0x1c, 0x7c, 0xdc, 0xa9, 0xbd, 0xa7, - 0x74, 0x7f, 0x5f, 0x4f, 0x66, 0x0a, 0xef, 0x37, 0x68, 0x8b, 0x3f, 0x0f, 0x8e, 0xa1, 0x6b, 0xc4, - 0x13, 0x18, 0xf5, 0xfe, 0x0b, 0xc1, 0xd3, 0x10, 0xac, 0xe1, 0x48, 0x8a, 0x7e, 0x09, 0x0d, 0x8f, - 0x1a, 0x54, 0x63, 0xb6, 0x2b, 0x5b, 0xdc, 0xdb, 0x15, 0x73, 0x8a, 0x0c, 0xa8, 0x71, 0x2c, 0x4d, - 0x03, 0xf8, 0xf0, 0x0b, 0x47, 0x90, 0xe8, 0x13, 0x68, 0x30, 0x6a, 0x3a, 0x06, 0x61, 0x54, 0x9e, - 0x5e, 0x2a, 0xaf, 0x78, 0xef, 0xe0, 0x60, 0x47, 0xf6, 0xf0, 0x44, 0xaa, 0x89, 0xee, 0x19, 0xe5, - 0x69, 0xb8, 0x8a, 0x23, 0x18, 0xf4, 0x33, 0x68, 0x78, 0x8c, 0xbf, 0xea, 0xa3, 0x89, 0xa8, 0xb6, - 0x8b, 0x9e, 0x95, 0x64, 0x1f, 0x0d, 0x4c, 0x62, 0xe8, 0x70, 0x05, 0x47, 0x70, 0x68, 0x17, 0x96, - 0x4d, 0xdd, 0xc2, 0x94, 0x0c, 0x27, 0xc7, 0x54, 0xb3, 0xad, 0xa1, 0x27, 0xca, 0xb4, 0xde, 0x5f, - 0x93, 0x46, 0xcb, 0x07, 0x69, 0x31, 0xce, 0xea, 0xa3, 0x7d, 0x58, 0x0d, 0x9f, 0xdd, 0x8f, 0x74, - 0x8f, 0xd9, 0xee, 0x64, 0x5f, 0x37, 0x75, 0x26, 0x7a, 0x5e, 0xbd, 0xdf, 0x9e, 0x9e, 0x6f, 0xac, - 0xe2, 0x02, 0x39, 0x2e, 0xb4, 0xe2, 0x7d, 0xc5, 0x21, 0xbe, 0x47, 0x87, 0xa2, 0x87, 0x35, 0xe2, - 0xbe, 0x72, 0x24, 0x56, 0xb1, 0x94, 0xa2, 0x9f, 0xa6, 0xd2, 0xb4, 0x71, 0xb5, 0x34, 0x6d, 0x95, - 0xa7, 0x28, 0x3a, 0x85, 0x35, 0xc7, 0xb5, 0x47, 0x2e, 0xf5, 0xbc, 0x7b, 0x94, 0x0c, 0x0d, 0xdd, - 0xa2, 0xe1, 0xc9, 0x34, 0xc5, 0x8e, 0x5e, 0x9a, 0x9e, 0x6f, 0xac, 0x1d, 0x15, 0xab, 0xe0, 0x32, - 0xdb, 0xee, 0x5f, 0xe6, 0xe0, 0x46, 0xf6, 0x8d, 0x43, 0x1f, 0x03, 0xb2, 0x07, 0x1e, 0x75, 0xc7, - 0x74, 0xf8, 0x61, 0x30, 0xb8, 0xf1, 0xe9, 0x46, 0x11, 0xd3, 0x4d, 0x54, 0xb7, 0x87, 0x39, 0x0d, - 0x5c, 0x60, 0x15, 0xcc, 0x47, 0xb2, 0x00, 0x6a, 0x22, 0xd0, 0xc4, 0x7c, 0x94, 0x2b, 0x82, 0x5d, - 0x58, 0x96, 0xb5, 0x1f, 0x0a, 0x45, 0xb2, 0x26, 0xee, 0xfd, 0x34, 0x2d, 0xc6, 0x59, 0x7d, 0x74, - 0x17, 0x96, 0x5c, 0x9e, 0x07, 0x11, 0xc0, 0x82, 0x00, 0xf8, 0x8e, 0x04, 0x58, 0xc2, 0x49, 0x21, - 0x4e, 0xeb, 0xa2, 0x0f, 0xe1, 0x26, 0x19, 0x13, 0xdd, 0x20, 0x03, 0x83, 0x46, 0x00, 0x73, 0x02, - 0xe0, 0x45, 0x09, 0x70, 0x73, 0x37, 0xab, 0x80, 0xf3, 0x36, 0xe8, 0x00, 0x56, 0x7c, 0x2b, 0x0f, - 0x15, 0x24, 0xf1, 0x4b, 0x12, 0x6a, 0xe5, 0x34, 0xaf, 0x82, 0x8b, 0xec, 0xd0, 0xe7, 0x00, 0x5a, - 0xf8, 0xaa, 0x7b, 0xed, 0x79, 0xd1, 0x86, 0xdf, 0xac, 0x50, 0x6c, 0xd1, 0x28, 0x10, 0xb7, 0xc0, - 0x68, 0xc9, 0xc3, 0x09, 0x4c, 0x74, 0x07, 0x5a, 0x9a, 0x6d, 0x18, 0x22, 0xf3, 0xf7, 0x6c, 0xdf, - 0x62, 0x22, 0x79, 0xeb, 0x7d, 0xc4, 0x1f, 0xfb, 0xbd, 0x94, 0x04, 0x67, 0x34, 0xbb, 0x7f, 0x56, - 0x92, 0xcf, 0x4c, 0x58, 0xce, 0xe8, 0x4e, 0x6a, 0xf4, 0x79, 0x25, 0x33, 0xfa, 0xdc, 0xca, 0x5b, - 0x24, 0x26, 0x1f, 0x1d, 0x96, 0x78, 0xf2, 0xeb, 0xd6, 0x28, 0xb8, 0x70, 0xd9, 0x12, 0xdf, 0xba, - 0xb0, 0x94, 0x22, 0xed, 0xc4, 0xc3, 0x78, 0x53, 0xdc, 0x79, 0x52, 0x88, 0xd3, 0xc8, 0xdd, 0x0f, - 0xa0, 0x95, 0xae, 0xc3, 0xd4, 0x4c, 0xaf, 0x5c, 0x3a, 0xd3, 0x7f, 0xab, 0xc0, 0x5a, 0x89, 0x77, - 0x64, 0x40, 0xcb, 0x24, 0x8f, 0x13, 0xd7, 0x7c, 0xe9, 0x6c, 0xcc, 0x59, 0x93, 0x1a, 0xb0, 0x26, - 0xf5, 0x81, 0xc5, 0x0e, 0xdd, 0x63, 0xe6, 0xea, 0xd6, 0x28, 0xb8, 0x87, 0x83, 0x14, 0x16, 0xce, - 0x60, 0xa3, 0xcf, 0xa0, 0x61, 0x92, 0xc7, 0xc7, 0xbe, 0x3b, 0x2a, 0x3a, 0xaf, 0x6a, 0x7e, 0xc4, - 0xfb, 0x71, 0x20, 0x51, 0x70, 0x84, 0xd7, 0x3d, 0x84, 0xcd, 0xd4, 0x26, 0x79, 0xab, 0xa0, 0x0f, - 0x7d, 0xe3, 0x98, 0xc6, 0x17, 0xfe, 0x06, 0x34, 0x1d, 0xe2, 0x32, 0x3d, 0x6a, 0x17, 0xf5, 0xfe, - 0xd2, 0xf4, 0x7c, 0xa3, 0x79, 0x14, 0x2e, 0xe2, 0x58, 0xde, 0xfd, 0xb7, 0x02, 0xf5, 0x63, 0x8d, - 0x18, 0xf4, 0x1a, 0xa8, 0xc3, 0xbd, 0x14, 0x75, 0xe8, 0x96, 0x26, 0x91, 0x88, 0xa7, 0x94, 0x35, - 0xec, 0x67, 0x58, 0xc3, 0xcb, 0x97, 0xe0, 0x5c, 0x4c, 0x18, 0xde, 0x87, 0x66, 0xe4, 0x2e, 0xd5, - 0x25, 0x95, 0xcb, 0xba, 0x64, 0xf7, 0x77, 0x35, 0x58, 0x4c, 0xb8, 0xb8, 0x9a, 0x35, 0x3f, 0xee, - 0xc4, 0xa0, 0xc1, 0x3b, 0xc9, 0x4e, 0x95, 0x8d, 0xa8, 0xe1, 0x50, 0x11, 0xcc, 0x6f, 0xf1, 0xeb, - 0x9d, 0x9f, 0x35, 0x3e, 0x80, 0x16, 0x23, 0xee, 0x88, 0xb2, 0x50, 0x26, 0x0e, 0xac, 0x19, 0x93, - 0x87, 0x93, 0x94, 0x14, 0x67, 0xb4, 0xd7, 0xef, 0xc2, 0x52, 0xca, 0xd9, 0x95, 0x86, 0xb0, 0x2f, - 0xf9, 0xe1, 0xc4, 0xc9, 0x79, 0x0d, 0xd9, 0xf5, 0x71, 0x2a, 0xbb, 0xb6, 0xca, 0x0f, 0x33, 0x51, - 0x32, 0x65, 0x39, 0x86, 0x33, 0x39, 0xf6, 0x7a, 0x25, 0xb4, 0x8b, 0x33, 0xed, 0x9f, 0x35, 0x58, - 0x4d, 0x68, 0xc7, 0xdc, 0xf4, 0x87, 0xa9, 0x06, 0xbd, 0x95, 0x69, 0xd0, 0xed, 0x22, 0x9b, 0xe7, - 0x46, 0x4e, 0x8b, 0x09, 0xe3, 0xec, 0xff, 0x22, 0x61, 0xfc, 0x93, 0x02, 0xcb, 0x89, 0xb3, 0xbb, - 0x06, 0xc6, 0xf8, 0x20, 0xcd, 0x18, 0x5f, 0xae, 0x92, 0x34, 0x25, 0x94, 0xf1, 0xab, 0xf9, 0x54, - 0xf0, 0xff, 0xf7, 0x24, 0xe6, 0xd7, 0xb0, 0x3a, 0xb6, 0x0d, 0xdf, 0xa4, 0x7b, 0x06, 0xd1, 0xcd, - 0x50, 0x81, 0x0f, 0x7d, 0xb3, 0xd9, 0x3f, 0x86, 0x22, 0x78, 0xea, 0x7a, 0xba, 0xc7, 0xa8, 0xc5, - 0x3e, 0x8d, 0x2d, 0xfb, 0xdf, 0x95, 0x4e, 0x56, 0x3f, 0x2d, 0x80, 0xc3, 0x85, 0x4e, 0xd0, 0x0f, - 0x60, 0x91, 0x0f, 0xcc, 0xba, 0x46, 0x39, 0xf7, 0x96, 0x89, 0xb5, 0x22, 0x81, 0x16, 0x8f, 0x63, - 0x11, 0x4e, 0xea, 0xa1, 0x47, 0xb0, 0xe2, 0xd8, 0xc3, 0x03, 0x62, 0x91, 0x11, 0xe5, 0x63, 0xc6, - 0x91, 0x6d, 0xe8, 0xda, 0x44, 0x30, 0x9b, 0x66, 0xff, 0xdd, 0x70, 0xb8, 0x3c, 0xca, 0xab, 0x3c, - 0xe3, 0x14, 0x21, 0xbf, 0x2c, 0x8a, 0xba, 0x08, 0x12, 0xb9, 0xd0, 0xf2, 0xe5, 0x73, 0x2f, 0x89, - 0x5e, 0xf0, 0x17, 0xce, 0x4e, 0x95, 0x0c, 0x3b, 0x4d, 0x59, 0xc6, 0xdd, 0x3f, 0xbd, 0x8e, 0x33, - 0x1e, 0x4a, 0x89, 0x5b, 0xe3, 0xbf, 0x22, 0x6e, 0x05, 0x4c, 0xb2, 0x79, 0x35, 0x26, 0xd9, 0xfd, - 0x43, 0x1d, 0x6e, 0xe6, 0xba, 0x2d, 0xfa, 0xf1, 0x05, 0x24, 0xe9, 0xd6, 0x73, 0x23, 0x48, 0x39, - 0x76, 0x33, 0x7b, 0x05, 0x76, 0xb3, 0x0b, 0xcb, 0x9a, 0xef, 0xba, 0xd4, 0x62, 0x19, 0x6e, 0x13, - 0x9d, 0xc5, 0x5e, 0x5a, 0x8c, 0xb3, 0xfa, 0x45, 0x04, 0xad, 0x7e, 0x45, 0x82, 0x96, 0x8c, 0x42, - 0x0e, 0xd9, 0x41, 0xe6, 0xe6, 0xa3, 0x90, 0xb3, 0x76, 0x56, 0x9f, 0x0f, 0x18, 0x01, 0x6a, 0x84, - 0xb0, 0x90, 0x1e, 0x30, 0x4e, 0x53, 0x52, 0x9c, 0xd1, 0x2e, 0x20, 0x3b, 0xcd, 0xaa, 0x64, 0x07, - 0x91, 0x14, 0x15, 0x03, 0xd1, 0x26, 0xb6, 0xab, 0x94, 0x43, 0x75, 0x2e, 0x56, 0xc8, 0x42, 0x17, - 0xaf, 0xce, 0x42, 0xbb, 0x7f, 0x55, 0xe0, 0xc5, 0xd2, 0x82, 0x44, 0xbb, 0xa9, 0xe7, 0x7f, 0x3b, - 0xf3, 0xfc, 0x7f, 0xaf, 0xd4, 0x30, 0x31, 0x03, 0xb8, 0xc5, 0x34, 0xed, 0xfd, 0x6a, 0x34, 0xad, - 0x80, 0x43, 0x5c, 0xce, 0xd7, 0xfa, 0xdb, 0x4f, 0x9e, 0x76, 0x66, 0xbe, 0x7e, 0xda, 0x99, 0xf9, - 0xe6, 0x69, 0x67, 0xe6, 0x37, 0xd3, 0x8e, 0xf2, 0x64, 0xda, 0x51, 0xbe, 0x9e, 0x76, 0x94, 0x6f, - 0xa6, 0x1d, 0xe5, 0xef, 0xd3, 0x8e, 0xf2, 0xd5, 0xb7, 0x9d, 0x99, 0xcf, 0x16, 0xa4, 0xc7, 0xff, - 0x04, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x02, 0xfa, 0x27, 0xed, 0x1b, 0x00, 0x00, + // 1968 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x4a, 0xa2, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x49, 0x81, 0x35, + 0x12, 0xe5, 0x43, 0xcb, 0x58, 0x49, 0x83, 0xc4, 0x2e, 0xdc, 0x8a, 0x92, 0x1b, 0x3b, 0x90, 0x22, + 0x65, 0x24, 0xc5, 0x68, 0xfa, 0x81, 0x0c, 0xc9, 0x31, 0xb5, 0xd1, 0x7e, 0x61, 0x77, 0xc8, 0x98, + 0xe8, 0xa5, 0x7f, 0x40, 0x81, 0xf4, 0xdc, 0xbf, 0xa2, 0xb7, 0x16, 0xed, 0xad, 0x87, 0xc2, 0xc7, + 0xa0, 0x97, 0xa6, 0x17, 0xa2, 0x66, 0xae, 0xed, 0xad, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, 0xec, + 0xf7, 0xae, 0xb4, 0x2c, 0x60, 0x01, 0xcd, 0x8d, 0x3b, 0xef, 0xbd, 0xdf, 0x9b, 0x99, 0xf7, 0x31, + 0xef, 0x47, 0xf8, 0xe1, 0xf9, 0xbb, 0xae, 0xaa, 0x59, 0xcd, 0xf3, 0x7e, 0x9b, 0x3a, 0x26, 0x65, + 0xd4, 0x6d, 0x0e, 0xa8, 0xd9, 0xb5, 0x9c, 0xa6, 0x14, 0x10, 0x5b, 0x6b, 0x12, 0xdb, 0x76, 0x9b, + 0x83, 0x5b, 0x6d, 0xca, 0xc8, 0xad, 0x66, 0x8f, 0x9a, 0xd4, 0x21, 0x8c, 0x76, 0x55, 0xdb, 0xb1, + 0x98, 0x85, 0xd6, 0x3c, 0x45, 0x95, 0xd8, 0x9a, 0xca, 0x15, 0x55, 0xa9, 0xb8, 0xbe, 0xd5, 0xd3, + 0xd8, 0x59, 0xbf, 0xad, 0x76, 0x2c, 0xa3, 0xd9, 0xb3, 0x7a, 0x56, 0x53, 0xe8, 0xb7, 0xfb, 0x8f, + 0xc4, 0x97, 0xf8, 0x10, 0xbf, 0x3c, 0x9c, 0xf5, 0x46, 0xc4, 0x61, 0xc7, 0x72, 0x68, 0x73, 0x90, + 0xf2, 0xb5, 0xfe, 0x76, 0xa8, 0x63, 0x90, 0xce, 0x99, 0x66, 0x52, 0x67, 0xd8, 0xb4, 0xcf, 0x7b, + 0x7c, 0xc1, 0x6d, 0x1a, 0x94, 0x91, 0x2c, 0xab, 0x66, 0x9e, 0x95, 0xd3, 0x37, 0x99, 0x66, 0xd0, + 0x94, 0xc1, 0x3b, 0x97, 0x19, 0xb8, 0x9d, 0x33, 0x6a, 0x90, 0x94, 0xdd, 0x5b, 0x79, 0x76, 0x7d, + 0xa6, 0xe9, 0x4d, 0xcd, 0x64, 0x2e, 0x73, 0x92, 0x46, 0x8d, 0x7f, 0x2b, 0x80, 0x76, 0x2d, 0x93, + 0x39, 0x96, 0xae, 0x53, 0x07, 0xd3, 0x81, 0xe6, 0x6a, 0x96, 0x89, 0x3e, 0x85, 0x32, 0x3f, 0x4f, + 0x97, 0x30, 0x52, 0x55, 0x36, 0x94, 0xcd, 0xc5, 0xed, 0x37, 0xd5, 0xf0, 0xa6, 0x03, 0x78, 0xd5, + 0x3e, 0xef, 0xf1, 0x05, 0x57, 0xe5, 0xda, 0xea, 0xe0, 0x96, 0x7a, 0xd8, 0xfe, 0x8c, 0x76, 0xd8, + 0x01, 0x65, 0xa4, 0x85, 0x9e, 0x8c, 0xea, 0x53, 0xe3, 0x51, 0x1d, 0xc2, 0x35, 0x1c, 0xa0, 0xa2, + 0x43, 0x98, 0x15, 0xe8, 0xd3, 0x02, 0x7d, 0x2b, 0x17, 0x5d, 0x1e, 0x5a, 0xc5, 0xe4, 0xf3, 0x7b, + 0x8f, 0x19, 0x35, 0xf9, 0xf6, 0x5a, 0x2f, 0x48, 0xe8, 0xd9, 0x3d, 0xc2, 0x08, 0x16, 0x40, 0xe8, + 0x0d, 0x28, 0x3b, 0x72, 0xfb, 0xd5, 0x99, 0x0d, 0x65, 0x73, 0xa6, 0x75, 0x4d, 0x6a, 0x95, 0xfd, + 0x63, 0xe1, 0x40, 0xa3, 0xf1, 0x44, 0x81, 0x1b, 0xe9, 0x73, 0xef, 0x6b, 0x2e, 0x43, 0x3f, 0x4d, + 0x9d, 0x5d, 0x2d, 0x76, 0x76, 0x6e, 0x2d, 0x4e, 0x1e, 0x38, 0xf6, 0x57, 0x22, 0xe7, 0x3e, 0x82, + 0x92, 0xc6, 0xa8, 0xe1, 0x56, 0xa7, 0x37, 0x66, 0x36, 0x17, 0xb7, 0x5f, 0x57, 0x73, 0x12, 0x58, + 0x4d, 0xef, 0xae, 0xb5, 0x24, 0x71, 0x4b, 0x0f, 0x38, 0x02, 0xf6, 0x80, 0x1a, 0xbf, 0x9a, 0x06, + 0xd8, 0xa3, 0xb6, 0x6e, 0x0d, 0x0d, 0x6a, 0xb2, 0x2b, 0x08, 0xdd, 0x03, 0x98, 0x75, 0x6d, 0xda, + 0x91, 0xa1, 0x7b, 0x25, 0xf7, 0x04, 0xe1, 0xa6, 0x8e, 0x6d, 0xda, 0x09, 0x83, 0xc6, 0xbf, 0xb0, + 0x80, 0x40, 0x1f, 0xc1, 0x9c, 0xcb, 0x08, 0xeb, 0xbb, 0x22, 0x64, 0x8b, 0xdb, 0xaf, 0x16, 0x01, + 0x13, 0x06, 0xad, 0x8a, 0x84, 0x9b, 0xf3, 0xbe, 0xb1, 0x04, 0x6a, 0xfc, 0x75, 0x06, 0x56, 0x42, + 0xe5, 0x5d, 0xcb, 0xec, 0x6a, 0x8c, 0xa7, 0xf4, 0x1d, 0x98, 0x65, 0x43, 0x9b, 0x8a, 0x3b, 0x59, + 0x68, 0xbd, 0xe2, 0x6f, 0xe6, 0x64, 0x68, 0xd3, 0x67, 0xa3, 0xfa, 0x5a, 0x86, 0x09, 0x17, 0x61, + 0x61, 0x84, 0xf6, 0x83, 0x7d, 0x4e, 0x0b, 0xf3, 0xb7, 0xe3, 0xce, 0x9f, 0x8d, 0xea, 0x19, 0x0d, + 0x44, 0x0d, 0x90, 0xe2, 0x5b, 0x44, 0x9f, 0x41, 0x45, 0x27, 0x2e, 0x3b, 0xb5, 0xbb, 0x84, 0xd1, + 0x13, 0xcd, 0xa0, 0xd5, 0x39, 0x71, 0xfa, 0xd7, 0x8a, 0x05, 0x8a, 0x5b, 0xb4, 0x6e, 0xc8, 0x1d, + 0x54, 0xf6, 0x63, 0x48, 0x38, 0x81, 0x8c, 0x06, 0x80, 0xf8, 0xca, 0x89, 0x43, 0x4c, 0xd7, 0x3b, + 0x15, 0xf7, 0x37, 0x3f, 0xb1, 0xbf, 0x75, 0xe9, 0x0f, 0xed, 0xa7, 0xd0, 0x70, 0x86, 0x07, 0xf4, + 0x32, 0xcc, 0x39, 0x94, 0xb8, 0x96, 0x59, 0x9d, 0x15, 0x37, 0x16, 0x84, 0x0b, 0x8b, 0x55, 0x2c, + 0xa5, 0xe8, 0x55, 0x98, 0x37, 0xa8, 0xeb, 0x92, 0x1e, 0xad, 0x96, 0x84, 0xe2, 0xb2, 0x54, 0x9c, + 0x3f, 0xf0, 0x96, 0xb1, 0x2f, 0x6f, 0xfc, 0x5e, 0x81, 0x4a, 0x18, 0xa6, 0x2b, 0xa8, 0xd5, 0xfb, + 0xf1, 0x5a, 0xfd, 0x6e, 0x81, 0xe4, 0xcc, 0xa9, 0xd1, 0x7f, 0x4c, 0x03, 0x0a, 0x95, 0xb0, 0xa5, + 0xeb, 0x6d, 0xd2, 0x39, 0x47, 0x1b, 0x30, 0x6b, 0x12, 0xc3, 0xcf, 0xc9, 0xa0, 0x40, 0x3e, 0x24, + 0x06, 0xc5, 0x42, 0x82, 0xbe, 0x50, 0x00, 0xf5, 0x45, 0x34, 0xbb, 0x3b, 0xa6, 0x69, 0x31, 0xc2, + 0x2f, 0xd8, 0xdf, 0xd0, 0x6e, 0x81, 0x0d, 0xf9, 0xbe, 0xd4, 0xd3, 0x14, 0xca, 0x3d, 0x93, 0x39, + 0xc3, 0x30, 0xb0, 0x69, 0x05, 0x9c, 0xe1, 0x1a, 0xfd, 0x04, 0xc0, 0x91, 0x98, 0x27, 0x96, 0x2c, + 0xdb, 0xfc, 0x1e, 0xe0, 0xbb, 0xdf, 0xb5, 0xcc, 0x47, 0x5a, 0x2f, 0x6c, 0x2c, 0x38, 0x80, 0xc0, + 0x11, 0xb8, 0xf5, 0x7b, 0xb0, 0x96, 0xb3, 0x4f, 0x74, 0x0d, 0x66, 0xce, 0xe9, 0xd0, 0xbb, 0x2a, + 0xcc, 0x7f, 0xa2, 0x55, 0x28, 0x0d, 0x88, 0xde, 0xa7, 0x5e, 0x4d, 0x62, 0xef, 0xe3, 0xf6, 0xf4, + 0xbb, 0x4a, 0xe3, 0xb7, 0xa5, 0x68, 0xa6, 0xf0, 0x7e, 0x83, 0x36, 0xf9, 0xf3, 0x60, 0xeb, 0x5a, + 0x87, 0xb8, 0x02, 0xa3, 0xd4, 0x7a, 0xc1, 0x7b, 0x1a, 0xbc, 0x35, 0x1c, 0x48, 0xd1, 0xcf, 0xa0, + 0xec, 0x52, 0x9d, 0x76, 0x98, 0xe5, 0xc8, 0x16, 0xf7, 0x56, 0xc1, 0x9c, 0x22, 0x6d, 0xaa, 0x1f, + 0x4b, 0x53, 0x0f, 0xde, 0xff, 0xc2, 0x01, 0x24, 0xfa, 0x08, 0xca, 0x8c, 0x1a, 0xb6, 0x4e, 0x18, + 0x95, 0xb7, 0x17, 0xcb, 0x2b, 0xde, 0x3b, 0x38, 0xd8, 0x91, 0xd5, 0x3d, 0x91, 0x6a, 0xa2, 0x7b, + 0x06, 0x79, 0xea, 0xaf, 0xe2, 0x00, 0x06, 0xfd, 0x18, 0xca, 0x2e, 0xe3, 0xaf, 0x7a, 0x6f, 0x28, + 0xaa, 0xed, 0xa2, 0x67, 0x25, 0xda, 0x47, 0x3d, 0x93, 0x10, 0xda, 0x5f, 0xc1, 0x01, 0x1c, 0xda, + 0x81, 0x65, 0x43, 0x33, 0x31, 0x25, 0xdd, 0xe1, 0x31, 0xed, 0x58, 0x66, 0xd7, 0x15, 0x65, 0x5a, + 0x6a, 0xad, 0x49, 0xa3, 0xe5, 0x83, 0xb8, 0x18, 0x27, 0xf5, 0xd1, 0x3e, 0xac, 0xfa, 0xcf, 0xee, + 0x7d, 0xcd, 0x65, 0x96, 0x33, 0xdc, 0xd7, 0x0c, 0x8d, 0x89, 0x9e, 0x57, 0x6a, 0x55, 0xc7, 0xa3, + 0xfa, 0x2a, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xde, 0x57, 0x6c, 0xd2, 0x77, 0x69, 0x57, 0xf4, 0xb0, + 0x72, 0xd8, 0x57, 0x8e, 0xc4, 0x2a, 0x96, 0x52, 0xf4, 0x30, 0x96, 0xa6, 0xe5, 0xc9, 0xd2, 0xb4, + 0x92, 0x9f, 0xa2, 0xe8, 0x14, 0xd6, 0x6c, 0xc7, 0xea, 0x39, 0xd4, 0x75, 0xf7, 0x28, 0xe9, 0xea, + 0x9a, 0x49, 0xfd, 0x9b, 0x59, 0x10, 0x27, 0x7a, 0x69, 0x3c, 0xaa, 0xaf, 0x1d, 0x65, 0xab, 0xe0, + 0x3c, 0xdb, 0xc6, 0x9f, 0x66, 0xe1, 0x5a, 0xf2, 0x8d, 0x43, 0x1f, 0x00, 0xb2, 0xda, 0x2e, 0x75, + 0x06, 0xb4, 0xfb, 0xbe, 0x37, 0xb8, 0xf1, 0xe9, 0x46, 0x11, 0xd3, 0x4d, 0x50, 0xb7, 0x87, 0x29, + 0x0d, 0x9c, 0x61, 0xe5, 0xcd, 0x47, 0xb2, 0x00, 0xa6, 0xc5, 0x46, 0x23, 0xf3, 0x51, 0xaa, 0x08, + 0x76, 0x60, 0x59, 0xd6, 0xbe, 0x2f, 0x14, 0xc9, 0x1a, 0x89, 0xfb, 0x69, 0x5c, 0x8c, 0x93, 0xfa, + 0xe8, 0x0e, 0x2c, 0x39, 0x3c, 0x0f, 0x02, 0x80, 0x79, 0x01, 0xf0, 0x2d, 0x09, 0xb0, 0x84, 0xa3, + 0x42, 0x1c, 0xd7, 0x45, 0xef, 0xc3, 0x75, 0x32, 0x20, 0x9a, 0x4e, 0xda, 0x3a, 0x0d, 0x00, 0x66, + 0x05, 0xc0, 0x8b, 0x12, 0xe0, 0xfa, 0x4e, 0x52, 0x01, 0xa7, 0x6d, 0xd0, 0x01, 0xac, 0xf4, 0xcd, + 0x34, 0x94, 0x97, 0xc4, 0x2f, 0x49, 0xa8, 0x95, 0xd3, 0xb4, 0x0a, 0xce, 0xb2, 0x43, 0x9f, 0x02, + 0x74, 0xfc, 0x57, 0xdd, 0xad, 0xce, 0x89, 0x36, 0xfc, 0x46, 0x81, 0x62, 0x0b, 0x46, 0x81, 0xb0, + 0x05, 0x06, 0x4b, 0x2e, 0x8e, 0x60, 0xa2, 0xdb, 0x50, 0xe9, 0x58, 0xba, 0x2e, 0x32, 0x7f, 0xd7, + 0xea, 0x9b, 0x4c, 0x24, 0x6f, 0xa9, 0x85, 0xf8, 0x63, 0xbf, 0x1b, 0x93, 0xe0, 0x84, 0x66, 0xe3, + 0x8f, 0x4a, 0xf4, 0x99, 0xf1, 0xcb, 0x19, 0xdd, 0x8e, 0x8d, 0x3e, 0x2f, 0x27, 0x46, 0x9f, 0x1b, + 0x69, 0x8b, 0xc8, 0xe4, 0xa3, 0xc1, 0x12, 0x4f, 0x7e, 0xcd, 0xec, 0x79, 0x01, 0x97, 0x2d, 0xf1, + 0xcd, 0x0b, 0x4b, 0x29, 0xd0, 0x8e, 0x3c, 0x8c, 0xd7, 0x45, 0xcc, 0xa3, 0x42, 0x1c, 0x47, 0x6e, + 0xdc, 0x85, 0x4a, 0xbc, 0x0e, 0x63, 0x33, 0xbd, 0x72, 0xe9, 0x4c, 0xff, 0xb5, 0x02, 0x6b, 0x39, + 0xde, 0x91, 0x0e, 0x15, 0x83, 0x3c, 0x8e, 0x84, 0xf9, 0xd2, 0xd9, 0x98, 0xb3, 0x26, 0xd5, 0x63, + 0x4d, 0xea, 0x03, 0x93, 0x1d, 0x3a, 0xc7, 0xcc, 0xd1, 0xcc, 0x9e, 0x17, 0x87, 0x83, 0x18, 0x16, + 0x4e, 0x60, 0xa3, 0x4f, 0xa0, 0x6c, 0x90, 0xc7, 0xc7, 0x7d, 0xa7, 0x97, 0x75, 0x5f, 0xc5, 0xfc, + 0x88, 0xf7, 0xe3, 0x40, 0xa2, 0xe0, 0x00, 0xaf, 0x71, 0x08, 0x1b, 0xb1, 0x43, 0xf2, 0x56, 0x41, + 0x1f, 0xf5, 0xf5, 0x63, 0x1a, 0x06, 0xfc, 0x75, 0x58, 0xb0, 0x89, 0xc3, 0xb4, 0xa0, 0x5d, 0x94, + 0x5a, 0x4b, 0xe3, 0x51, 0x7d, 0xe1, 0xc8, 0x5f, 0xc4, 0xa1, 0xbc, 0xf1, 0x1f, 0x05, 0x4a, 0xc7, + 0x1d, 0xa2, 0xd3, 0x2b, 0xa0, 0x0e, 0x7b, 0x31, 0xea, 0xd0, 0xc8, 0x4d, 0x22, 0xb1, 0x9f, 0x5c, + 0xd6, 0xb0, 0x9f, 0x60, 0x0d, 0x37, 0x2f, 0xc1, 0xb9, 0x98, 0x30, 0xbc, 0x07, 0x0b, 0x81, 0xbb, + 0x58, 0x97, 0x54, 0x2e, 0xeb, 0x92, 0x8d, 0xdf, 0x4c, 0xc3, 0x62, 0xc4, 0xc5, 0x64, 0xd6, 0xfc, + 0xba, 0x23, 0x83, 0x06, 0xef, 0x24, 0xdb, 0x45, 0x0e, 0xa2, 0xfa, 0x43, 0x85, 0x37, 0xbf, 0x85, + 0xaf, 0x77, 0x7a, 0xd6, 0xb8, 0x0b, 0x15, 0x46, 0x9c, 0x1e, 0x65, 0xbe, 0x4c, 0x5c, 0xd8, 0x42, + 0x48, 0x1e, 0x4e, 0x62, 0x52, 0x9c, 0xd0, 0x5e, 0xbf, 0x03, 0x4b, 0x31, 0x67, 0x13, 0x0d, 0x61, + 0x5f, 0xf0, 0xcb, 0x09, 0x93, 0xf3, 0x0a, 0xb2, 0xeb, 0x83, 0x58, 0x76, 0x6d, 0xe6, 0x5f, 0x66, + 0xa4, 0x64, 0xf2, 0x72, 0x0c, 0x27, 0x72, 0xec, 0xb5, 0x42, 0x68, 0x17, 0x67, 0xda, 0x3f, 0xa7, + 0x61, 0x35, 0xa2, 0x1d, 0x72, 0xd3, 0xef, 0xc7, 0x1a, 0xf4, 0x66, 0xa2, 0x41, 0x57, 0xb3, 0x6c, + 0x9e, 0x1b, 0x39, 0xcd, 0x26, 0x8c, 0x33, 0xff, 0x8f, 0x84, 0xf1, 0x0f, 0x0a, 0x2c, 0x47, 0xee, + 0xee, 0x0a, 0x18, 0xe3, 0x83, 0x38, 0x63, 0xbc, 0x59, 0x24, 0x69, 0x72, 0x28, 0xe3, 0xbf, 0x14, + 0x68, 0x46, 0xb4, 0x8e, 0xa8, 0xe3, 0x6a, 0x2e, 0xa3, 0x26, 0xfb, 0xd8, 0xd2, 0xfb, 0x06, 0xdd, + 0xd5, 0x89, 0x66, 0x60, 0xca, 0x17, 0x34, 0xcb, 0x3c, 0xb2, 0x74, 0xad, 0x33, 0x44, 0x04, 0x16, + 0x3f, 0x3f, 0xa3, 0xe6, 0x1e, 0xd5, 0x29, 0xa3, 0x5d, 0x99, 0x4e, 0x3f, 0x90, 0xf0, 0x8b, 0x0f, + 0x43, 0xd1, 0xb3, 0x51, 0x7d, 0xb3, 0x08, 0xa2, 0xc8, 0xb2, 0x28, 0x26, 0xfa, 0x39, 0x00, 0xff, + 0x14, 0xfd, 0xa8, 0x2b, 0x13, 0xee, 0xae, 0x5f, 0x95, 0x0f, 0x03, 0xc9, 0x44, 0x0e, 0x22, 0x88, + 0x8d, 0xbf, 0xcd, 0xc7, 0x62, 0xf6, 0x8d, 0xe7, 0x6e, 0xbf, 0x80, 0xd5, 0x41, 0x78, 0x3b, 0xbe, + 0x02, 0x9f, 0x75, 0x67, 0x92, 0xff, 0x87, 0x05, 0xf0, 0x59, 0xf7, 0xda, 0xfa, 0xb6, 0x74, 0xb2, + 0xfa, 0x71, 0x06, 0x1c, 0xce, 0x74, 0x82, 0xbe, 0x07, 0x8b, 0x9c, 0x27, 0x68, 0x1d, 0xfa, 0x21, + 0x31, 0xfc, 0x7a, 0x5a, 0xf1, 0xf3, 0xe5, 0x38, 0x14, 0xe1, 0xa8, 0x1e, 0x3a, 0x83, 0x15, 0xdb, + 0xea, 0x1e, 0x10, 0x93, 0xf4, 0x28, 0x9f, 0xae, 0xbc, 0x50, 0x0a, 0x42, 0xb7, 0xd0, 0x7a, 0xc7, + 0x9f, 0xa9, 0x8f, 0xd2, 0x2a, 0xcf, 0x38, 0x33, 0x4a, 0x2f, 0x8b, 0x24, 0xc8, 0x82, 0x44, 0x0e, + 0x54, 0xfa, 0x72, 0xca, 0x91, 0xfc, 0xd6, 0xfb, 0xe7, 0x6a, 0xbb, 0x48, 0x61, 0x9d, 0xc6, 0x2c, + 0xc3, 0x47, 0x2f, 0xbe, 0x8e, 0x13, 0x1e, 0x72, 0xf9, 0x6a, 0xf9, 0x7f, 0xe2, 0xab, 0x19, 0x04, + 0x7a, 0x61, 0x42, 0x02, 0xfd, 0x67, 0x05, 0x6e, 0xda, 0x05, 0x6a, 0xa9, 0x0a, 0xe2, 0x6e, 0xee, + 0x17, 0xb9, 0x9b, 0x22, 0xb5, 0xd9, 0xda, 0x1c, 0x8f, 0xea, 0x37, 0x8b, 0x68, 0xe2, 0x42, 0xfb, + 0x6b, 0xfc, 0xae, 0x04, 0xd7, 0x53, 0xaf, 0x25, 0xfa, 0xd1, 0x05, 0x24, 0xf7, 0xc6, 0x73, 0x23, + 0xb8, 0x29, 0x76, 0x3a, 0x33, 0x01, 0x3b, 0xdd, 0x81, 0xe5, 0x4e, 0xdf, 0x71, 0xa8, 0xc9, 0x12, + 0xdc, 0x34, 0x08, 0xea, 0x6e, 0x5c, 0x8c, 0x93, 0xfa, 0x59, 0x04, 0xbb, 0x34, 0x21, 0xc1, 0x8e, + 0xee, 0x42, 0x92, 0x24, 0xaf, 0x04, 0xd3, 0xbb, 0x90, 0x5c, 0x29, 0xa9, 0xcf, 0x07, 0x44, 0x0f, + 0x35, 0x40, 0x98, 0x8f, 0x0f, 0x88, 0xa7, 0x31, 0x29, 0x4e, 0x68, 0x67, 0x90, 0xd5, 0x85, 0xa2, + 0x64, 0x15, 0x91, 0x18, 0x95, 0x06, 0xd1, 0xef, 0xb6, 0x8a, 0xe4, 0x6e, 0x71, 0x2e, 0x9d, 0xf9, + 0x2f, 0xc2, 0xe2, 0xe4, 0xff, 0x22, 0x34, 0xfe, 0xa2, 0xc0, 0x8b, 0xb9, 0x9d, 0x05, 0xed, 0xc4, + 0xc6, 0xb7, 0xad, 0xc4, 0xf8, 0xf6, 0x9d, 0x5c, 0xc3, 0xc8, 0x0c, 0xe7, 0x64, 0xd3, 0xec, 0xf7, + 0x8a, 0xd1, 0xec, 0x0c, 0x0e, 0x78, 0x39, 0xdf, 0x6e, 0x6d, 0x3d, 0x79, 0x5a, 0x9b, 0xfa, 0xf2, + 0x69, 0x6d, 0xea, 0xab, 0xa7, 0xb5, 0xa9, 0x5f, 0x8e, 0x6b, 0xca, 0x93, 0x71, 0x4d, 0xf9, 0x72, + 0x5c, 0x53, 0xbe, 0x1a, 0xd7, 0x94, 0xbf, 0x8f, 0x6b, 0xca, 0xaf, 0xbf, 0xae, 0x4d, 0x7d, 0x32, + 0x2f, 0x3d, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0x3d, 0xfc, 0xe0, 0xc3, 0xad, 0x1d, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -1687,6 +1724,39 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.WhenScaled) + copy(dAtA[i:], m.WhenScaled) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenScaled))) + i-- + dAtA[i] = 0x12 + i -= len(m.WhenDeleted) + copy(dAtA[i:], m.WhenDeleted) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenDeleted))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1707,6 +1777,18 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PersistentVolumeClaimRetentionPolicy != nil { + { + size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) i-- dAtA[i] = 0x48 @@ -2213,6 +2295,19 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WhenDeleted) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenScaled) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetSpec) Size() (n int) { if m == nil { return 0 @@ -2244,6 +2339,10 @@ func (m *StatefulSetSpec) Size() (n int) { n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.PersistentVolumeClaimRetentionPolicy != nil { + l = m.PersistentVolumeClaimRetentionPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2559,6 +2658,17 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetPersistentVolumeClaimRetentionPolicy{`, + `WhenDeleted:` + fmt.Sprintf("%v", this.WhenDeleted) + `,`, + `WhenScaled:` + fmt.Sprintf("%v", this.WhenScaled) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetSpec) String() string { if this == nil { return "nil" @@ -2578,6 +2688,7 @@ func (this *StatefulSetSpec) String() string { `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, `}`, }, "") return s @@ -5436,6 +5547,120 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenDeleted", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenDeleted = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenScaled", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenScaled = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -5724,6 +5949,42 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaimRetentionPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaimRetentionPolicy == nil { + m.PersistentVolumeClaimRetentionPolicy = &StatefulSetPersistentVolumeClaimRetentionPolicy{} + } + if err := m.PersistentVolumeClaimRetentionPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 128efa9ca..9f9df98fc 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -190,7 +190,7 @@ message DeploymentStatus { // +optional optional int32 updatedReplicas = 3; - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. // +optional optional int32 readyReplicas = 7; @@ -367,6 +367,23 @@ message StatefulSetList { repeated StatefulSet items = 2; } +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +message StatefulSetPersistentVolumeClaimRetentionPolicy { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + optional string whenDeleted = 1; + + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + optional string whenScaled = 2; +} + // A StatefulSetSpec is the specification of a StatefulSet. message StatefulSetSpec { // replicas is the desired number of replicas of the given Template. @@ -434,6 +451,12 @@ message StatefulSetSpec { // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional optional int32 minReadySeconds = 9; + + // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from + // the StatefulSet VolumeClaimTemplates. This requires the + // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // +optional + optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -446,7 +469,7 @@ message StatefulSetStatus { // replicas is the number of Pods created by the StatefulSet controller. optional int32 replicas = 2; - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition. optional int32 readyReplicas = 3; // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -478,9 +501,7 @@ message StatefulSetStatus { repeated StatefulSetCondition conditions = 10; // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. - // Remove omitempty when graduating to beta - // +optional + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index be638bb0f..832ef34f4 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -158,6 +158,40 @@ type RollingUpdateStatefulSetStrategy struct { Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` } +// PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine +// when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is +// deleted or scaled down. +type PersistentVolumeClaimRetentionPolicyType string + +const ( + // RetainPersistentVolumeClaimRetentionPolicyType is the default + // PersistentVolumeClaimRetentionPolicy and specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will not be deleted. + RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" + // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will be deleted in the scenario specified in + // StatefulSetPersistentVolumeClaimRetentionPolicy. + RetentionPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +type StatefulSetPersistentVolumeClaimRetentionPolicy struct { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -225,6 +259,12 @@ type StatefulSetSpec struct { // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from + // the StatefulSet VolumeClaimTemplates. This requires the + // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // +optional + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -237,7 +277,7 @@ type StatefulSetStatus struct { // replicas is the number of Pods created by the StatefulSet controller. Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition. ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -269,10 +309,8 @@ type StatefulSetStatus struct { Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. - // Remove omitempty when graduating to beta - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,11,opt,name=availableReplicas"` + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } type StatefulSetConditionType string @@ -485,7 +523,7 @@ type DeploymentStatus struct { // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 51e08b575..e92881a35 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -116,7 +116,7 @@ var map_DeploymentStatus = map[string]string{ "observedGeneration": "The generation observed by the deployment controller.", "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", + "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", "conditions": "Represents the latest available observations of a deployment's current state.", @@ -227,17 +227,28 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ + "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", + "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", +} + +func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { + return map_StatefulSetPersistentVolumeClaimRetentionPolicy +} + var map_StatefulSetSpec = map[string]string{ - "": "A StatefulSetSpec is the specification of a StatefulSet.", - "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", - "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", - "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", - "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -248,14 +259,14 @@ var map_StatefulSetStatus = map[string]string{ "": "StatefulSetStatus represents the current state of a StatefulSet.", "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", "replicas": "replicas is the number of Pods created by the StatefulSet controller.", - "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "readyReplicas": "readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition.", "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. Remove omitempty when graduating to beta", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index fb2761241..be3fcc75b 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -499,6 +500,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy { + if in == nil { + return nil + } + out := new(StatefulSetPersistentVolumeClaimRetentionPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = *in @@ -526,6 +543,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(int32) **out = **in } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } return } diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go index f3850fc90..1e2233e31 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index e03a95acd..cd1a06e25 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -833,10 +833,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { + *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} +} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} +func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{28} +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.Merge(m, src) +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.InternalMessageInfo + func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{28} + return fileDescriptor_42fe616264472f7e, []int{29} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +894,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{29} + return fileDescriptor_42fe616264472f7e, []int{30} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +922,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{30} + return fileDescriptor_42fe616264472f7e, []int{31} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -947,6 +977,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") + proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy") @@ -957,144 +988,150 @@ func init() { } var fileDescriptor_42fe616264472f7e = []byte{ - // 2182 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1c, 0xb7, - 0xf9, 0xf6, 0xec, 0x87, 0xb4, 0x4b, 0x59, 0x92, 0x4d, 0xe9, 0x27, 0x6d, 0xec, 0x5f, 0x57, 0xc6, - 0x26, 0x70, 0x94, 0xd8, 0x9a, 0xb5, 0x95, 0x0f, 0x24, 0x76, 0xd1, 0x56, 0x2b, 0xa5, 0xb6, 0x03, - 0x7d, 0x85, 0xb2, 0x0c, 0x34, 0x68, 0x51, 0x53, 0xbb, 0xf4, 0x6a, 0xa2, 0xf9, 0xc2, 0x0c, 0x67, - 0xeb, 0x45, 0x2f, 0xbd, 0x16, 0x28, 0xd0, 0xf4, 0xda, 0x7f, 0xa2, 0xb7, 0xa2, 0x68, 0x6e, 0x45, - 0x50, 0xf8, 0x18, 0xf4, 0x92, 0x9c, 0x84, 0x7a, 0x73, 0x2a, 0x8a, 0x1e, 0x7b, 0x09, 0x50, 0xa0, - 0x20, 0x87, 0xf3, 0xc1, 0xf9, 0xf0, 0x8e, 0x14, 0x47, 0x69, 0x82, 0xdc, 0xb4, 0xe4, 0xf3, 0x3e, - 0x7c, 0x5f, 0xf2, 0x25, 0xdf, 0x87, 0x1c, 0x81, 0x1f, 0x1d, 0xbd, 0xe5, 0xaa, 0x9a, 0xd5, 0x3e, - 0xf2, 0x0e, 0x88, 0x63, 0x12, 0x4a, 0xdc, 0xf6, 0x80, 0x98, 0x3d, 0xcb, 0x69, 0x8b, 0x0e, 0x6c, - 0x6b, 0x6d, 0x6c, 0xdb, 0x6e, 0x7b, 0x70, 0xf3, 0x80, 0x50, 0xbc, 0xda, 0xee, 0x13, 0x93, 0x38, - 0x98, 0x92, 0x9e, 0x6a, 0x3b, 0x16, 0xb5, 0xe0, 0xa2, 0x0f, 0x54, 0xb1, 0xad, 0xa9, 0x0c, 0xa8, - 0x0a, 0xe0, 0xa5, 0x95, 0xbe, 0x46, 0x0f, 0xbd, 0x03, 0xb5, 0x6b, 0x19, 0xed, 0xbe, 0xd5, 0xb7, - 0xda, 0x1c, 0x7f, 0xe0, 0x3d, 0xe2, 0xbf, 0xf8, 0x0f, 0xfe, 0x97, 0xcf, 0x73, 0xa9, 0x15, 0x1b, - 0xb0, 0x6b, 0x39, 0xa4, 0x3d, 0xb8, 0x99, 0x1c, 0xeb, 0xd2, 0xeb, 0x11, 0xc6, 0xc0, 0xdd, 0x43, - 0xcd, 0x24, 0xce, 0xb0, 0x6d, 0x1f, 0xf5, 0x59, 0x83, 0xdb, 0x36, 0x08, 0xc5, 0x59, 0x56, 0xed, - 0x3c, 0x2b, 0xc7, 0x33, 0xa9, 0x66, 0x90, 0x94, 0xc1, 0x9b, 0xe3, 0x0c, 0xdc, 0xee, 0x21, 0x31, - 0x70, 0xca, 0xee, 0xb5, 0x3c, 0x3b, 0x8f, 0x6a, 0x7a, 0x5b, 0x33, 0xa9, 0x4b, 0x9d, 0xa4, 0x51, - 0xeb, 0xdf, 0x0a, 0x80, 0xeb, 0x96, 0x49, 0x1d, 0x4b, 0xd7, 0x89, 0x83, 0xc8, 0x40, 0x73, 0x35, - 0xcb, 0x84, 0x0f, 0x41, 0x8d, 0xc5, 0xd3, 0xc3, 0x14, 0x37, 0x94, 0x2b, 0xca, 0xf2, 0xd4, 0xea, - 0x0d, 0x35, 0x9a, 0xe9, 0x90, 0x5e, 0xb5, 0x8f, 0xfa, 0xac, 0xc1, 0x55, 0x19, 0x5a, 0x1d, 0xdc, - 0x54, 0x77, 0x0e, 0x3e, 0x20, 0x5d, 0xba, 0x45, 0x28, 0xee, 0xc0, 0x27, 0xc7, 0x4b, 0xe7, 0x46, - 0xc7, 0x4b, 0x20, 0x6a, 0x43, 0x21, 0x2b, 0xdc, 0x01, 0x15, 0xce, 0x5e, 0xe2, 0xec, 0x2b, 0xb9, - 0xec, 0x22, 0x68, 0x15, 0xe1, 0x5f, 0xbc, 0xf3, 0x98, 0x12, 0x93, 0xb9, 0xd7, 0x39, 0x2f, 0xa8, - 0x2b, 0x1b, 0x98, 0x62, 0xc4, 0x89, 0xe0, 0x75, 0x50, 0x73, 0x84, 0xfb, 0x8d, 0xf2, 0x15, 0x65, - 0xb9, 0xdc, 0xb9, 0x20, 0x50, 0xb5, 0x20, 0x2c, 0x14, 0x22, 0x5a, 0x4f, 0x14, 0xb0, 0x90, 0x8e, - 0x7b, 0x53, 0x73, 0x29, 0xfc, 0x69, 0x2a, 0x76, 0xb5, 0x58, 0xec, 0xcc, 0x9a, 0x47, 0x1e, 0x0e, - 0x1c, 0xb4, 0xc4, 0xe2, 0xde, 0x05, 0x55, 0x8d, 0x12, 0xc3, 0x6d, 0x94, 0xae, 0x94, 0x97, 0xa7, - 0x56, 0xaf, 0xa9, 0x39, 0x09, 0xac, 0xa6, 0xbd, 0xeb, 0x4c, 0x0b, 0xde, 0xea, 0x3d, 0xc6, 0x80, - 0x7c, 0xa2, 0xd6, 0xaf, 0x4b, 0xa0, 0xbe, 0x81, 0x89, 0x61, 0x99, 0x7b, 0x84, 0x9e, 0xc1, 0xca, - 0xdd, 0x05, 0x15, 0xd7, 0x26, 0x5d, 0xb1, 0x72, 0x57, 0x73, 0x03, 0x08, 0x7d, 0xda, 0xb3, 0x49, - 0x37, 0x5a, 0x32, 0xf6, 0x0b, 0x71, 0x06, 0xb8, 0x0b, 0x26, 0x5c, 0x8a, 0xa9, 0xe7, 0xf2, 0x05, - 0x9b, 0x5a, 0x5d, 0x2e, 0xc0, 0xc5, 0xf1, 0x9d, 0x19, 0xc1, 0x36, 0xe1, 0xff, 0x46, 0x82, 0xa7, - 0xf5, 0x8f, 0x12, 0x80, 0x21, 0x76, 0xdd, 0x32, 0x7b, 0x1a, 0x65, 0xe9, 0x7c, 0x0b, 0x54, 0xe8, - 0xd0, 0x26, 0x7c, 0x42, 0xea, 0x9d, 0xab, 0x81, 0x2b, 0xf7, 0x87, 0x36, 0xf9, 0xe2, 0x78, 0x69, - 0x21, 0x6d, 0xc1, 0x7a, 0x10, 0xb7, 0x81, 0x9b, 0xa1, 0x93, 0x25, 0x6e, 0xfd, 0xba, 0x3c, 0xf4, - 0x17, 0xc7, 0x4b, 0x19, 0x67, 0x87, 0x1a, 0x32, 0xc9, 0x0e, 0xc2, 0x01, 0x80, 0x3a, 0x76, 0xe9, - 0x7d, 0x07, 0x9b, 0xae, 0x3f, 0x92, 0x66, 0x10, 0x11, 0xfe, 0xab, 0xc5, 0x16, 0x8a, 0x59, 0x74, - 0x2e, 0x09, 0x2f, 0xe0, 0x66, 0x8a, 0x0d, 0x65, 0x8c, 0x00, 0xaf, 0x82, 0x09, 0x87, 0x60, 0xd7, - 0x32, 0x1b, 0x15, 0x1e, 0x45, 0x38, 0x81, 0x88, 0xb7, 0x22, 0xd1, 0x0b, 0x5f, 0x01, 0x93, 0x06, - 0x71, 0x5d, 0xdc, 0x27, 0x8d, 0x2a, 0x07, 0xce, 0x0a, 0xe0, 0xe4, 0x96, 0xdf, 0x8c, 0x82, 0xfe, - 0xd6, 0x1f, 0x15, 0x30, 0x1d, 0xce, 0xdc, 0x19, 0xec, 0x9c, 0x3b, 0xf2, 0xce, 0x69, 0x8d, 0x4f, - 0x96, 0x9c, 0x0d, 0xf3, 0x71, 0x39, 0xe6, 0x38, 0x4b, 0x47, 0xf8, 0x33, 0x50, 0x73, 0x89, 0x4e, - 0xba, 0xd4, 0x72, 0x84, 0xe3, 0xaf, 0x15, 0x74, 0x1c, 0x1f, 0x10, 0x7d, 0x4f, 0x98, 0x76, 0xce, - 0x33, 0xcf, 0x83, 0x5f, 0x28, 0xa4, 0x84, 0xef, 0x81, 0x1a, 0x25, 0x86, 0xad, 0x63, 0x4a, 0xc4, - 0xae, 0x79, 0x31, 0xee, 0x3c, 0xcb, 0x19, 0x46, 0xb6, 0x6b, 0xf5, 0xee, 0x0b, 0x18, 0xdf, 0x32, - 0xe1, 0x64, 0x04, 0xad, 0x28, 0xa4, 0x81, 0x36, 0x98, 0xf1, 0xec, 0x1e, 0x43, 0x52, 0x76, 0x9c, - 0xf7, 0x87, 0x22, 0x87, 0x6e, 0x8c, 0x9f, 0x95, 0x7d, 0xc9, 0xae, 0xb3, 0x20, 0x46, 0x99, 0x91, - 0xdb, 0x51, 0x82, 0x1f, 0xae, 0x81, 0x59, 0x43, 0x33, 0x11, 0xc1, 0xbd, 0xe1, 0x1e, 0xe9, 0x5a, - 0x66, 0xcf, 0xe5, 0xa9, 0x54, 0xed, 0x2c, 0x0a, 0x82, 0xd9, 0x2d, 0xb9, 0x1b, 0x25, 0xf1, 0x70, - 0x13, 0xcc, 0x07, 0x07, 0xf0, 0x5d, 0xcd, 0xa5, 0x96, 0x33, 0xdc, 0xd4, 0x0c, 0x8d, 0x36, 0x26, - 0x38, 0x4f, 0x63, 0x74, 0xbc, 0x34, 0x8f, 0x32, 0xfa, 0x51, 0xa6, 0x55, 0xeb, 0x77, 0x13, 0x60, - 0x36, 0x71, 0x2e, 0xc0, 0x07, 0x60, 0xa1, 0xeb, 0x39, 0x0e, 0x31, 0xe9, 0xb6, 0x67, 0x1c, 0x10, - 0x67, 0xaf, 0x7b, 0x48, 0x7a, 0x9e, 0x4e, 0x7a, 0x7c, 0x59, 0xab, 0x9d, 0xa6, 0xf0, 0x75, 0x61, - 0x3d, 0x13, 0x85, 0x72, 0xac, 0xe1, 0xbb, 0x00, 0x9a, 0xbc, 0x69, 0x4b, 0x73, 0xdd, 0x90, 0xb3, - 0xc4, 0x39, 0xc3, 0xad, 0xb8, 0x9d, 0x42, 0xa0, 0x0c, 0x2b, 0xe6, 0x63, 0x8f, 0xb8, 0x9a, 0x43, - 0x7a, 0x49, 0x1f, 0xcb, 0xb2, 0x8f, 0x1b, 0x99, 0x28, 0x94, 0x63, 0x0d, 0xdf, 0x00, 0x53, 0xfe, - 0x68, 0x7c, 0xce, 0xc5, 0xe2, 0xcc, 0x09, 0xb2, 0xa9, 0xed, 0xa8, 0x0b, 0xc5, 0x71, 0x2c, 0x34, - 0xeb, 0xc0, 0x25, 0xce, 0x80, 0xf4, 0xee, 0xf8, 0xe2, 0x80, 0x55, 0xd0, 0x2a, 0xaf, 0xa0, 0x61, - 0x68, 0x3b, 0x29, 0x04, 0xca, 0xb0, 0x62, 0xa1, 0xf9, 0x59, 0x93, 0x0a, 0x6d, 0x42, 0x0e, 0x6d, - 0x3f, 0x13, 0x85, 0x72, 0xac, 0x59, 0xee, 0xf9, 0x2e, 0xaf, 0x0d, 0xb0, 0xa6, 0xe3, 0x03, 0x9d, - 0x34, 0x26, 0xe5, 0xdc, 0xdb, 0x96, 0xbb, 0x51, 0x12, 0x0f, 0xef, 0x80, 0x8b, 0x7e, 0xd3, 0xbe, - 0x89, 0x43, 0x92, 0x1a, 0x27, 0x79, 0x41, 0x90, 0x5c, 0xdc, 0x4e, 0x02, 0x50, 0xda, 0x06, 0xde, - 0x02, 0x33, 0x5d, 0x4b, 0xd7, 0x79, 0x3e, 0xae, 0x5b, 0x9e, 0x49, 0x1b, 0x75, 0xce, 0x02, 0xd9, - 0x1e, 0x5a, 0x97, 0x7a, 0x50, 0x02, 0x09, 0x7f, 0x0e, 0x40, 0x37, 0x28, 0x0c, 0x6e, 0x03, 0x8c, - 0x51, 0x00, 0xe9, 0xb2, 0x14, 0x55, 0xe6, 0xb0, 0xc9, 0x45, 0x31, 0xca, 0xd6, 0xc7, 0x0a, 0x58, - 0xcc, 0xd9, 0xe8, 0xf0, 0x87, 0x52, 0x11, 0xbc, 0x96, 0x28, 0x82, 0x97, 0x73, 0xcc, 0x62, 0x95, - 0xf0, 0x10, 0x4c, 0x33, 0x41, 0xa2, 0x99, 0x7d, 0x1f, 0x22, 0xce, 0xb2, 0x76, 0x6e, 0x00, 0x28, - 0x8e, 0x8e, 0x4e, 0xe5, 0x8b, 0xa3, 0xe3, 0xa5, 0x69, 0xa9, 0x0f, 0xc9, 0xc4, 0xad, 0xdf, 0x94, - 0x00, 0xd8, 0x20, 0xb6, 0x6e, 0x0d, 0x0d, 0x62, 0x9e, 0x85, 0xa6, 0xb9, 0x27, 0x69, 0x9a, 0x97, - 0xf3, 0x97, 0x24, 0x74, 0x2a, 0x57, 0xd4, 0xbc, 0x97, 0x10, 0x35, 0xaf, 0x14, 0x21, 0x7b, 0xb6, - 0xaa, 0xf9, 0xb4, 0x0c, 0xe6, 0x22, 0x70, 0x24, 0x6b, 0x6e, 0x4b, 0x2b, 0xfa, 0x72, 0x62, 0x45, - 0x17, 0x33, 0x4c, 0xbe, 0x32, 0x5d, 0xf3, 0x01, 0x98, 0x61, 0xaa, 0xc3, 0x5f, 0x3f, 0xae, 0x69, - 0x26, 0x4e, 0xac, 0x69, 0xc2, 0x4a, 0xb4, 0x29, 0x31, 0xa1, 0x04, 0x73, 0x8e, 0x86, 0x9a, 0xfc, - 0x26, 0x6a, 0xa8, 0x3f, 0x29, 0x60, 0x26, 0x5a, 0xa6, 0x33, 0x10, 0x51, 0x77, 0x65, 0x11, 0xf5, - 0x62, 0x81, 0xe4, 0xcc, 0x51, 0x51, 0x9f, 0x56, 0xe2, 0xae, 0x73, 0x19, 0xb5, 0xcc, 0xae, 0x60, - 0xb6, 0xae, 0x75, 0xb1, 0x2b, 0xea, 0xed, 0x79, 0xff, 0xfa, 0xe5, 0xb7, 0xa1, 0xb0, 0x57, 0x12, - 0x5c, 0xa5, 0xaf, 0x56, 0x70, 0x95, 0x9f, 0x8f, 0xe0, 0xfa, 0x09, 0xa8, 0xb9, 0x81, 0xd4, 0xaa, - 0x70, 0xca, 0x6b, 0x85, 0x36, 0xb6, 0x50, 0x59, 0x21, 0x75, 0xa8, 0xaf, 0x42, 0xba, 0x2c, 0x65, - 0x55, 0xfd, 0x3a, 0x95, 0x15, 0x4b, 0x74, 0x1b, 0x7b, 0x2e, 0xe9, 0xf1, 0x4d, 0x55, 0x8b, 0x12, - 0x7d, 0x97, 0xb7, 0x22, 0xd1, 0x0b, 0xf7, 0xc1, 0xa2, 0xed, 0x58, 0x7d, 0x87, 0xb8, 0xee, 0x06, - 0xc1, 0x3d, 0x5d, 0x33, 0x49, 0x10, 0x80, 0x5f, 0x13, 0x2f, 0x8f, 0x8e, 0x97, 0x16, 0x77, 0xb3, - 0x21, 0x28, 0xcf, 0xb6, 0xf5, 0x97, 0x0a, 0xb8, 0x90, 0x3c, 0x1b, 0x73, 0x64, 0x8a, 0x72, 0x2a, - 0x99, 0x72, 0x3d, 0x96, 0xa7, 0xbe, 0x86, 0x8b, 0x3d, 0x15, 0xa4, 0x72, 0x75, 0x0d, 0xcc, 0x0a, - 0x59, 0x12, 0x74, 0x0a, 0xa1, 0x16, 0x2e, 0xcf, 0xbe, 0xdc, 0x8d, 0x92, 0x78, 0x78, 0x1b, 0x4c, - 0x3b, 0x5c, 0x79, 0x05, 0x04, 0xbe, 0x7a, 0xf9, 0x3f, 0x41, 0x30, 0x8d, 0xe2, 0x9d, 0x48, 0xc6, - 0x32, 0xe5, 0x12, 0x09, 0x92, 0x80, 0xa0, 0x22, 0x2b, 0x97, 0xb5, 0x24, 0x00, 0xa5, 0x6d, 0xe0, - 0x16, 0x98, 0xf3, 0xcc, 0x34, 0x95, 0x9f, 0x6b, 0x97, 0x05, 0xd5, 0xdc, 0x7e, 0x1a, 0x82, 0xb2, - 0xec, 0xe0, 0x43, 0x49, 0xcc, 0x4c, 0xf0, 0xf3, 0xe4, 0x7a, 0x81, 0x3d, 0x51, 0x58, 0xcd, 0x64, - 0x48, 0xad, 0x5a, 0x51, 0xa9, 0xd5, 0xfa, 0x48, 0x01, 0x30, 0xbd, 0x0f, 0xc7, 0xbe, 0x04, 0xa4, - 0x2c, 0x62, 0x15, 0x53, 0xcb, 0xd6, 0x3f, 0x37, 0x0a, 0xea, 0x9f, 0xe8, 0x40, 0x2d, 0x26, 0x80, - 0xc4, 0x44, 0x9f, 0xcd, 0xa3, 0x4e, 0x51, 0x01, 0x14, 0x39, 0xf5, 0x1c, 0x04, 0x50, 0x8c, 0xec, - 0xd9, 0x02, 0xe8, 0x9f, 0x25, 0x30, 0x17, 0x81, 0x0b, 0x0b, 0xa0, 0x0c, 0x93, 0xef, 0x1e, 0x76, - 0x8a, 0x89, 0x92, 0x68, 0xea, 0xfe, 0x97, 0x44, 0x49, 0xe4, 0x55, 0x8e, 0x28, 0xf9, 0x43, 0x29, - 0xee, 0xfa, 0x09, 0x45, 0xc9, 0x73, 0x78, 0xe1, 0xf8, 0xc6, 0xe9, 0x9a, 0xd6, 0x5f, 0xcb, 0xe0, - 0x42, 0x72, 0x1f, 0x4a, 0x05, 0x52, 0x19, 0x5b, 0x20, 0x77, 0xc1, 0xfc, 0x23, 0x4f, 0xd7, 0x87, - 0x3c, 0x86, 0x58, 0x95, 0xf4, 0x4b, 0xeb, 0xff, 0x0b, 0xcb, 0xf9, 0x1f, 0x67, 0x60, 0x50, 0xa6, - 0x65, 0xba, 0x5e, 0x56, 0xbe, 0x6c, 0xbd, 0xac, 0x9e, 0xa2, 0x5e, 0x66, 0x4b, 0x8e, 0xf2, 0xa9, - 0x24, 0xc7, 0xc9, 0x8a, 0x65, 0xc6, 0xc1, 0x35, 0xf6, 0xea, 0x3f, 0x52, 0xc0, 0x42, 0xf6, 0x85, - 0x1b, 0xea, 0x60, 0xc6, 0xc0, 0x8f, 0xe3, 0x0f, 0x1f, 0xe3, 0x8a, 0x88, 0x47, 0x35, 0x5d, 0xf5, - 0x3f, 0x19, 0xa9, 0xf7, 0x4c, 0xba, 0xe3, 0xec, 0x51, 0x47, 0x33, 0xfb, 0x7e, 0xe5, 0xdd, 0x92, - 0xb8, 0x50, 0x82, 0x1b, 0xbe, 0x0f, 0x6a, 0x06, 0x7e, 0xbc, 0xe7, 0x39, 0xfd, 0xac, 0x0a, 0x59, - 0x6c, 0x1c, 0xbe, 0x01, 0xb6, 0x04, 0x0b, 0x0a, 0xf9, 0x5a, 0x9f, 0x2b, 0x60, 0x31, 0xa7, 0xaa, - 0x7e, 0x8b, 0xa2, 0xdc, 0x01, 0x57, 0xa4, 0x20, 0xd9, 0xae, 0x24, 0x8f, 0x3c, 0x9d, 0x6f, 0x50, - 0x21, 0x64, 0xae, 0x81, 0xba, 0x8d, 0x1d, 0xaa, 0x85, 0x32, 0xb8, 0xda, 0x99, 0x1e, 0x1d, 0x2f, - 0xd5, 0x77, 0x83, 0x46, 0x14, 0xf5, 0xb7, 0xfe, 0xa3, 0x80, 0xea, 0x5e, 0x17, 0xeb, 0xe4, 0x0c, - 0x94, 0xc4, 0x86, 0xa4, 0x24, 0xf2, 0x5f, 0xe9, 0xb9, 0x3f, 0xb9, 0x22, 0x62, 0x33, 0x21, 0x22, - 0x5e, 0x1a, 0xc3, 0xf3, 0x6c, 0xfd, 0xf0, 0x36, 0xa8, 0x87, 0xc3, 0x9d, 0xec, 0x70, 0x6b, 0xfd, - 0xbe, 0x04, 0xa6, 0x62, 0x43, 0x9c, 0xf0, 0x68, 0x7c, 0x28, 0xd5, 0x03, 0xb6, 0xe9, 0x57, 0x8b, - 0x04, 0xa2, 0x06, 0x67, 0xff, 0x3b, 0x26, 0x75, 0xe2, 0x97, 0xc7, 0x74, 0x49, 0xf8, 0x01, 0x98, - 0xa1, 0xd8, 0xe9, 0x13, 0x1a, 0xf4, 0xf1, 0x09, 0xab, 0x47, 0x8f, 0x29, 0xf7, 0xa5, 0x5e, 0x94, - 0x40, 0x5f, 0xba, 0x0d, 0xa6, 0xa5, 0xc1, 0xe0, 0x05, 0x50, 0x3e, 0x22, 0x43, 0x5f, 0x52, 0x21, - 0xf6, 0x27, 0x9c, 0x07, 0xd5, 0x01, 0xd6, 0x3d, 0x3f, 0xcf, 0xeb, 0xc8, 0xff, 0x71, 0xab, 0xf4, - 0x96, 0xd2, 0xfa, 0x2d, 0x9b, 0x9c, 0x28, 0x39, 0xcf, 0x20, 0xbb, 0xde, 0x95, 0xb2, 0x2b, 0xff, - 0x83, 0x61, 0x7c, 0xcb, 0xe4, 0xe5, 0x18, 0x4a, 0xe4, 0xd8, 0xab, 0x85, 0xd8, 0x9e, 0x9d, 0x69, - 0xff, 0x2a, 0x81, 0xf9, 0x18, 0x3a, 0x92, 0xaa, 0xdf, 0x97, 0xa4, 0xea, 0x72, 0x42, 0xaa, 0x36, - 0xb2, 0x6c, 0xbe, 0xd3, 0xaa, 0xe3, 0xb5, 0xea, 0x9f, 0x15, 0x30, 0x1b, 0x9b, 0xbb, 0x33, 0x10, - 0xab, 0xf7, 0x64, 0xb1, 0xfa, 0x52, 0x91, 0xa4, 0xc9, 0x51, 0xab, 0x1f, 0x4e, 0x48, 0xce, 0x7f, - 0xeb, 0xdf, 0xd0, 0x7e, 0x09, 0xe6, 0x07, 0x96, 0xee, 0x19, 0x64, 0x5d, 0xc7, 0x9a, 0x11, 0x00, - 0x98, 0xba, 0x2b, 0x27, 0xef, 0x89, 0x21, 0x3d, 0x71, 0x5c, 0xcd, 0xa5, 0xc4, 0xa4, 0x0f, 0x22, - 0xcb, 0x48, 0x53, 0x3e, 0xc8, 0xa0, 0x43, 0x99, 0x83, 0xc0, 0x37, 0xc0, 0x14, 0x53, 0x65, 0x5a, - 0x97, 0x6c, 0x63, 0x23, 0x48, 0xac, 0xf0, 0xf3, 0xd8, 0x5e, 0xd4, 0x85, 0xe2, 0x38, 0x78, 0x08, - 0xe6, 0x6c, 0xab, 0xb7, 0x85, 0x4d, 0xdc, 0x27, 0x4c, 0x66, 0xec, 0x5a, 0xba, 0xd6, 0x1d, 0xf2, - 0x87, 0xb5, 0x7a, 0xe7, 0xcd, 0xe0, 0xd1, 0x64, 0x37, 0x0d, 0x61, 0x17, 0xd0, 0x8c, 0x66, 0xbe, - 0xa9, 0xb3, 0x28, 0xa1, 0x93, 0xfa, 0xa4, 0xeb, 0x3f, 0x69, 0xaf, 0x16, 0xc9, 0xb0, 0x53, 0x7e, - 0xd4, 0xcd, 0x7b, 0x37, 0xac, 0x9d, 0xea, 0xdd, 0x30, 0xe3, 0x02, 0x55, 0x3f, 0xd9, 0x05, 0xaa, - 0xf5, 0x51, 0x15, 0x5c, 0x4c, 0x9d, 0xb6, 0x5f, 0xe3, 0xe3, 0x5f, 0xea, 0x26, 0x52, 0x3e, 0xc1, - 0x4d, 0x64, 0x0d, 0xcc, 0x8a, 0xef, 0xc9, 0x89, 0x8b, 0x4c, 0x38, 0x1f, 0xeb, 0x72, 0x37, 0x4a, - 0xe2, 0xb3, 0x1e, 0x1f, 0xab, 0x27, 0x7c, 0x7c, 0x8c, 0x7b, 0x21, 0xfe, 0x3f, 0xca, 0xcf, 0xde, - 0xb4, 0x17, 0xe2, 0xdf, 0xa4, 0x92, 0x78, 0x26, 0x32, 0x7c, 0xd6, 0x90, 0x61, 0x52, 0x16, 0x19, - 0xfb, 0x52, 0x2f, 0x4a, 0xa0, 0xbf, 0xd4, 0x37, 0x53, 0x9c, 0xf1, 0xcd, 0x74, 0xa5, 0xc8, 0x96, - 0x28, 0xfe, 0xce, 0x98, 0x79, 0x63, 0x9c, 0x3a, 0xf9, 0x8d, 0xb1, 0xf5, 0x37, 0x05, 0xbc, 0x90, - 0xbb, 0x29, 0xe1, 0x9a, 0x24, 0x01, 0x56, 0x12, 0x12, 0xe0, 0x7b, 0xb9, 0x86, 0x31, 0x1d, 0xe0, - 0x64, 0x3f, 0x41, 0xbe, 0x5d, 0xec, 0x09, 0x32, 0xe3, 0x1e, 0x31, 0xfe, 0x2d, 0xb2, 0xb3, 0xf2, - 0xe4, 0x69, 0xf3, 0xdc, 0x27, 0x4f, 0x9b, 0xe7, 0x3e, 0x7b, 0xda, 0x3c, 0xf7, 0xab, 0x51, 0x53, - 0x79, 0x32, 0x6a, 0x2a, 0x9f, 0x8c, 0x9a, 0xca, 0x67, 0xa3, 0xa6, 0xf2, 0xf7, 0x51, 0x53, 0xf9, - 0xf0, 0xf3, 0xe6, 0xb9, 0xf7, 0x27, 0xc5, 0x88, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xda, 0x5d, - 0xee, 0xc9, 0xd4, 0x29, 0x00, 0x00, + // 2284 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcf, 0x6f, 0x1b, 0xc7, + 0xf5, 0xf7, 0xf2, 0x87, 0x44, 0x8e, 0x2c, 0xc9, 0x1e, 0xe9, 0x2b, 0x31, 0xf2, 0xb7, 0xa4, 0xb1, + 0x31, 0x1c, 0x25, 0xb6, 0x48, 0x5b, 0xf9, 0x81, 0xc4, 0x2e, 0x92, 0x8a, 0x52, 0x6a, 0x3b, 0xd0, + 0x0f, 0x66, 0x64, 0x39, 0x68, 0xd0, 0x1f, 0x1e, 0x91, 0x63, 0x6a, 0xa3, 0xe5, 0xee, 0x62, 0x77, + 0x96, 0x31, 0xd1, 0x4b, 0xaf, 0x05, 0x0a, 0xb4, 0xbd, 0xf6, 0x9f, 0xe8, 0xad, 0x28, 0x9a, 0x5b, + 0x11, 0x04, 0x3e, 0x06, 0xbd, 0x24, 0xbd, 0x10, 0x35, 0x73, 0x2a, 0x8a, 0xde, 0xda, 0x8b, 0x81, + 0x02, 0xc5, 0xcc, 0xce, 0xfe, 0xde, 0x35, 0x97, 0x8a, 0xa3, 0x34, 0x41, 0x6e, 0xe2, 0xbc, 0xf7, + 0x3e, 0xf3, 0xde, 0xcc, 0x7b, 0xf3, 0x3e, 0x33, 0x2b, 0xf0, 0x83, 0xe3, 0xd7, 0xad, 0xba, 0xa2, + 0x37, 0x8e, 0xed, 0x43, 0x62, 0x6a, 0x84, 0x12, 0xab, 0xd1, 0x27, 0x5a, 0x47, 0x37, 0x1b, 0x42, + 0x80, 0x0d, 0xa5, 0x81, 0x0d, 0xc3, 0x6a, 0xf4, 0xaf, 0x1f, 0x12, 0x8a, 0xd7, 0x1b, 0x5d, 0xa2, + 0x11, 0x13, 0x53, 0xd2, 0xa9, 0x1b, 0xa6, 0x4e, 0x75, 0xb8, 0xec, 0x28, 0xd6, 0xb1, 0xa1, 0xd4, + 0x99, 0x62, 0x5d, 0x28, 0xae, 0xac, 0x75, 0x15, 0x7a, 0x64, 0x1f, 0xd6, 0xdb, 0x7a, 0xaf, 0xd1, + 0xd5, 0xbb, 0x7a, 0x83, 0xeb, 0x1f, 0xda, 0x0f, 0xf8, 0x2f, 0xfe, 0x83, 0xff, 0xe5, 0xe0, 0xac, + 0xc8, 0x81, 0x09, 0xdb, 0xba, 0x49, 0x1a, 0xfd, 0xeb, 0xd1, 0xb9, 0x56, 0x5e, 0xf1, 0x75, 0x7a, + 0xb8, 0x7d, 0xa4, 0x68, 0xc4, 0x1c, 0x34, 0x8c, 0xe3, 0x2e, 0x1b, 0xb0, 0x1a, 0x3d, 0x42, 0x71, + 0x92, 0x55, 0x23, 0xcd, 0xca, 0xb4, 0x35, 0xaa, 0xf4, 0x48, 0xcc, 0xe0, 0xb5, 0x71, 0x06, 0x56, + 0xfb, 0x88, 0xf4, 0x70, 0xcc, 0xee, 0xe5, 0x34, 0x3b, 0x9b, 0x2a, 0x6a, 0x43, 0xd1, 0xa8, 0x45, + 0xcd, 0xa8, 0x91, 0xfc, 0x6f, 0x09, 0xc0, 0x4d, 0x5d, 0xa3, 0xa6, 0xae, 0xaa, 0xc4, 0x44, 0xa4, + 0xaf, 0x58, 0x8a, 0xae, 0xc1, 0xfb, 0xa0, 0xc4, 0xe2, 0xe9, 0x60, 0x8a, 0x2b, 0xd2, 0x45, 0x69, + 0x75, 0x66, 0xfd, 0x5a, 0xdd, 0x5f, 0x69, 0x0f, 0xbe, 0x6e, 0x1c, 0x77, 0xd9, 0x80, 0x55, 0x67, + 0xda, 0xf5, 0xfe, 0xf5, 0xfa, 0xde, 0xe1, 0x07, 0xa4, 0x4d, 0x77, 0x08, 0xc5, 0x4d, 0xf8, 0x68, + 0x58, 0x3b, 0x33, 0x1a, 0xd6, 0x80, 0x3f, 0x86, 0x3c, 0x54, 0xb8, 0x07, 0x0a, 0x1c, 0x3d, 0xc7, + 0xd1, 0xd7, 0x52, 0xd1, 0x45, 0xd0, 0x75, 0x84, 0x3f, 0x7c, 0xfb, 0x21, 0x25, 0x1a, 0x73, 0xaf, + 0x79, 0x56, 0x40, 0x17, 0xb6, 0x30, 0xc5, 0x88, 0x03, 0xc1, 0xab, 0xa0, 0x64, 0x0a, 0xf7, 0x2b, + 0xf9, 0x8b, 0xd2, 0x6a, 0xbe, 0x79, 0x4e, 0x68, 0x95, 0xdc, 0xb0, 0x90, 0xa7, 0x21, 0x3f, 0x92, + 0xc0, 0x52, 0x3c, 0xee, 0x6d, 0xc5, 0xa2, 0xf0, 0xc7, 0xb1, 0xd8, 0xeb, 0xd9, 0x62, 0x67, 0xd6, + 0x3c, 0x72, 0x6f, 0x62, 0x77, 0x24, 0x10, 0x77, 0x0b, 0x14, 0x15, 0x4a, 0x7a, 0x56, 0x25, 0x77, + 0x31, 0xbf, 0x3a, 0xb3, 0x7e, 0xa5, 0x9e, 0x92, 0xc0, 0xf5, 0xb8, 0x77, 0xcd, 0x59, 0x81, 0x5b, + 0xbc, 0xc3, 0x10, 0x90, 0x03, 0x24, 0xff, 0x32, 0x07, 0xca, 0x5b, 0x98, 0xf4, 0x74, 0x6d, 0x9f, + 0xd0, 0x53, 0xd8, 0xb9, 0xdb, 0xa0, 0x60, 0x19, 0xa4, 0x2d, 0x76, 0xee, 0x72, 0x6a, 0x00, 0x9e, + 0x4f, 0xfb, 0x06, 0x69, 0xfb, 0x5b, 0xc6, 0x7e, 0x21, 0x8e, 0x00, 0x5b, 0x60, 0xca, 0xa2, 0x98, + 0xda, 0x16, 0xdf, 0xb0, 0x99, 0xf5, 0xd5, 0x0c, 0x58, 0x5c, 0xbf, 0x39, 0x27, 0xd0, 0xa6, 0x9c, + 0xdf, 0x48, 0xe0, 0xc8, 0x7f, 0xcf, 0x01, 0xe8, 0xe9, 0x6e, 0xea, 0x5a, 0x47, 0xa1, 0x2c, 0x9d, + 0x6f, 0x80, 0x02, 0x1d, 0x18, 0x84, 0x2f, 0x48, 0xb9, 0x79, 0xd9, 0x75, 0xe5, 0xee, 0xc0, 0x20, + 0x4f, 0x86, 0xb5, 0xa5, 0xb8, 0x05, 0x93, 0x20, 0x6e, 0x03, 0xb7, 0x3d, 0x27, 0x73, 0xdc, 0xfa, + 0x95, 0xf0, 0xd4, 0x4f, 0x86, 0xb5, 0x84, 0xb3, 0xa3, 0xee, 0x21, 0x85, 0x1d, 0x84, 0x7d, 0x00, + 0x55, 0x6c, 0xd1, 0xbb, 0x26, 0xd6, 0x2c, 0x67, 0x26, 0xa5, 0x47, 0x44, 0xf8, 0x2f, 0x65, 0xdb, + 0x28, 0x66, 0xd1, 0x5c, 0x11, 0x5e, 0xc0, 0xed, 0x18, 0x1a, 0x4a, 0x98, 0x01, 0x5e, 0x06, 0x53, + 0x26, 0xc1, 0x96, 0xae, 0x55, 0x0a, 0x3c, 0x0a, 0x6f, 0x01, 0x11, 0x1f, 0x45, 0x42, 0x0a, 0x5f, + 0x04, 0xd3, 0x3d, 0x62, 0x59, 0xb8, 0x4b, 0x2a, 0x45, 0xae, 0x38, 0x2f, 0x14, 0xa7, 0x77, 0x9c, + 0x61, 0xe4, 0xca, 0xe5, 0x3f, 0x48, 0x60, 0xd6, 0x5b, 0xb9, 0x53, 0xa8, 0x9c, 0x5b, 0xe1, 0xca, + 0x91, 0xc7, 0x27, 0x4b, 0x4a, 0xc1, 0x7c, 0x9c, 0x0f, 0x38, 0xce, 0xd2, 0x11, 0xfe, 0x04, 0x94, + 0x2c, 0xa2, 0x92, 0x36, 0xd5, 0x4d, 0xe1, 0xf8, 0xcb, 0x19, 0x1d, 0xc7, 0x87, 0x44, 0xdd, 0x17, + 0xa6, 0xcd, 0xb3, 0xcc, 0x73, 0xf7, 0x17, 0xf2, 0x20, 0xe1, 0xbb, 0xa0, 0x44, 0x49, 0xcf, 0x50, + 0x31, 0x25, 0xa2, 0x6a, 0x9e, 0x0f, 0x3a, 0xcf, 0x72, 0x86, 0x81, 0xb5, 0xf4, 0xce, 0x5d, 0xa1, + 0xc6, 0x4b, 0xc6, 0x5b, 0x0c, 0x77, 0x14, 0x79, 0x30, 0xd0, 0x00, 0x73, 0xb6, 0xd1, 0x61, 0x9a, + 0x94, 0x1d, 0xe7, 0xdd, 0x81, 0xc8, 0xa1, 0x6b, 0xe3, 0x57, 0xe5, 0x20, 0x64, 0xd7, 0x5c, 0x12, + 0xb3, 0xcc, 0x85, 0xc7, 0x51, 0x04, 0x1f, 0x6e, 0x80, 0xf9, 0x9e, 0xa2, 0x21, 0x82, 0x3b, 0x83, + 0x7d, 0xd2, 0xd6, 0xb5, 0x8e, 0xc5, 0x53, 0xa9, 0xd8, 0x5c, 0x16, 0x00, 0xf3, 0x3b, 0x61, 0x31, + 0x8a, 0xea, 0xc3, 0x6d, 0xb0, 0xe8, 0x1e, 0xc0, 0xb7, 0x15, 0x8b, 0xea, 0xe6, 0x60, 0x5b, 0xe9, + 0x29, 0xb4, 0x32, 0xc5, 0x71, 0x2a, 0xa3, 0x61, 0x6d, 0x11, 0x25, 0xc8, 0x51, 0xa2, 0x95, 0xfc, + 0xdb, 0x29, 0x30, 0x1f, 0x39, 0x17, 0xe0, 0x3d, 0xb0, 0xd4, 0xb6, 0x4d, 0x93, 0x68, 0x74, 0xd7, + 0xee, 0x1d, 0x12, 0x73, 0xbf, 0x7d, 0x44, 0x3a, 0xb6, 0x4a, 0x3a, 0x7c, 0x5b, 0x8b, 0xcd, 0xaa, + 0xf0, 0x75, 0x69, 0x33, 0x51, 0x0b, 0xa5, 0x58, 0xc3, 0x77, 0x00, 0xd4, 0xf8, 0xd0, 0x8e, 0x62, + 0x59, 0x1e, 0x66, 0x8e, 0x63, 0x7a, 0xa5, 0xb8, 0x1b, 0xd3, 0x40, 0x09, 0x56, 0xcc, 0xc7, 0x0e, + 0xb1, 0x14, 0x93, 0x74, 0xa2, 0x3e, 0xe6, 0xc3, 0x3e, 0x6e, 0x25, 0x6a, 0xa1, 0x14, 0x6b, 0xf8, + 0x2a, 0x98, 0x71, 0x66, 0xe3, 0x6b, 0x2e, 0x36, 0x67, 0x41, 0x80, 0xcd, 0xec, 0xfa, 0x22, 0x14, + 0xd4, 0x63, 0xa1, 0xe9, 0x87, 0x16, 0x31, 0xfb, 0xa4, 0x73, 0xcb, 0x21, 0x07, 0xac, 0x83, 0x16, + 0x79, 0x07, 0xf5, 0x42, 0xdb, 0x8b, 0x69, 0xa0, 0x04, 0x2b, 0x16, 0x9a, 0x93, 0x35, 0xb1, 0xd0, + 0xa6, 0xc2, 0xa1, 0x1d, 0x24, 0x6a, 0xa1, 0x14, 0x6b, 0x96, 0x7b, 0x8e, 0xcb, 0x1b, 0x7d, 0xac, + 0xa8, 0xf8, 0x50, 0x25, 0x95, 0xe9, 0x70, 0xee, 0xed, 0x86, 0xc5, 0x28, 0xaa, 0x0f, 0x6f, 0x81, + 0xf3, 0xce, 0xd0, 0x81, 0x86, 0x3d, 0x90, 0x12, 0x07, 0x79, 0x4e, 0x80, 0x9c, 0xdf, 0x8d, 0x2a, + 0xa0, 0xb8, 0x0d, 0xbc, 0x01, 0xe6, 0xda, 0xba, 0xaa, 0xf2, 0x7c, 0xdc, 0xd4, 0x6d, 0x8d, 0x56, + 0xca, 0x1c, 0x05, 0xb2, 0x1a, 0xda, 0x0c, 0x49, 0x50, 0x44, 0x13, 0xfe, 0x0c, 0x80, 0xb6, 0xdb, + 0x18, 0xac, 0x0a, 0x18, 0xc3, 0x00, 0xe2, 0x6d, 0xc9, 0xef, 0xcc, 0xde, 0x90, 0x85, 0x02, 0x90, + 0xf2, 0xc7, 0x12, 0x58, 0x4e, 0x29, 0x74, 0xf8, 0x56, 0xa8, 0x09, 0x5e, 0x89, 0x34, 0xc1, 0x0b, + 0x29, 0x66, 0x81, 0x4e, 0x78, 0x04, 0x66, 0x19, 0x21, 0x51, 0xb4, 0xae, 0xa3, 0x22, 0xce, 0xb2, + 0x46, 0x6a, 0x00, 0x28, 0xa8, 0xed, 0x9f, 0xca, 0xe7, 0x47, 0xc3, 0xda, 0x6c, 0x48, 0x86, 0xc2, + 0xc0, 0xf2, 0xaf, 0x72, 0x00, 0x6c, 0x11, 0x43, 0xd5, 0x07, 0x3d, 0xa2, 0x9d, 0x06, 0xa7, 0xb9, + 0x13, 0xe2, 0x34, 0x2f, 0xa4, 0x6f, 0x89, 0xe7, 0x54, 0x2a, 0xa9, 0x79, 0x37, 0x42, 0x6a, 0x5e, + 0xcc, 0x02, 0xf6, 0x74, 0x56, 0xf3, 0x59, 0x1e, 0x2c, 0xf8, 0xca, 0x3e, 0xad, 0xb9, 0x19, 0xda, + 0xd1, 0x17, 0x22, 0x3b, 0xba, 0x9c, 0x60, 0xf2, 0x95, 0xf1, 0x9a, 0x0f, 0xc0, 0x1c, 0x63, 0x1d, + 0xce, 0xfe, 0x71, 0x4e, 0x33, 0x35, 0x31, 0xa7, 0xf1, 0x3a, 0xd1, 0x76, 0x08, 0x09, 0x45, 0x90, + 0x53, 0x38, 0xd4, 0xf4, 0x37, 0x91, 0x43, 0xfd, 0x51, 0x02, 0x73, 0xfe, 0x36, 0x9d, 0x02, 0x89, + 0xba, 0x1d, 0x26, 0x51, 0xcf, 0x67, 0x48, 0xce, 0x14, 0x16, 0xf5, 0x59, 0x21, 0xe8, 0x3a, 0xa7, + 0x51, 0xab, 0xec, 0x0a, 0x66, 0xa8, 0x4a, 0x1b, 0x5b, 0xa2, 0xdf, 0x9e, 0x75, 0xae, 0x5f, 0xce, + 0x18, 0xf2, 0xa4, 0x21, 0xc2, 0x95, 0xfb, 0x6a, 0x09, 0x57, 0xfe, 0xd9, 0x10, 0xae, 0x1f, 0x81, + 0x92, 0xe5, 0x52, 0xad, 0x02, 0x87, 0xbc, 0x92, 0xa9, 0xb0, 0x05, 0xcb, 0xf2, 0xa0, 0x3d, 0x7e, + 0xe5, 0xc1, 0x25, 0x31, 0xab, 0xe2, 0xd7, 0xc9, 0xac, 0x58, 0xa2, 0x1b, 0xd8, 0xb6, 0x48, 0x87, + 0x17, 0x55, 0xc9, 0x4f, 0xf4, 0x16, 0x1f, 0x45, 0x42, 0x0a, 0x0f, 0xc0, 0xb2, 0x61, 0xea, 0x5d, + 0x93, 0x58, 0xd6, 0x16, 0xc1, 0x1d, 0x55, 0xd1, 0x88, 0x1b, 0x80, 0xd3, 0x13, 0x2f, 0x8c, 0x86, + 0xb5, 0xe5, 0x56, 0xb2, 0x0a, 0x4a, 0xb3, 0x95, 0xff, 0x5c, 0x00, 0xe7, 0xa2, 0x67, 0x63, 0x0a, + 0x4d, 0x91, 0x4e, 0x44, 0x53, 0xae, 0x06, 0xf2, 0xd4, 0xe1, 0x70, 0x81, 0xa7, 0x82, 0x58, 0xae, + 0x6e, 0x80, 0x79, 0x41, 0x4b, 0x5c, 0xa1, 0x20, 0x6a, 0xde, 0xf6, 0x1c, 0x84, 0xc5, 0x28, 0xaa, + 0x0f, 0x6f, 0x82, 0x59, 0x93, 0x33, 0x2f, 0x17, 0xc0, 0x61, 0x2f, 0xff, 0x27, 0x00, 0x66, 0x51, + 0x50, 0x88, 0xc2, 0xba, 0x8c, 0xb9, 0xf8, 0x84, 0xc4, 0x05, 0x28, 0x84, 0x99, 0xcb, 0x46, 0x54, + 0x01, 0xc5, 0x6d, 0xe0, 0x0e, 0x58, 0xb0, 0xb5, 0x38, 0x94, 0x93, 0x6b, 0x17, 0x04, 0xd4, 0xc2, + 0x41, 0x5c, 0x05, 0x25, 0xd9, 0xc1, 0xfb, 0x21, 0x32, 0x33, 0xc5, 0xcf, 0x93, 0xab, 0x19, 0x6a, + 0x22, 0x33, 0x9b, 0x49, 0xa0, 0x5a, 0xa5, 0xac, 0x54, 0x4b, 0xfe, 0x48, 0x02, 0x30, 0x5e, 0x87, + 0x63, 0x5f, 0x02, 0x62, 0x16, 0x81, 0x8e, 0xa9, 0x24, 0xf3, 0x9f, 0x6b, 0x19, 0xf9, 0x8f, 0x7f, + 0xa0, 0x66, 0x23, 0x40, 0x62, 0xa1, 0x4f, 0xe7, 0x51, 0x27, 0x2b, 0x01, 0xf2, 0x9d, 0x7a, 0x06, + 0x04, 0x28, 0x00, 0xf6, 0x74, 0x02, 0xf4, 0x8f, 0x1c, 0x58, 0xf0, 0x95, 0x33, 0x13, 0xa0, 0x04, + 0x93, 0xef, 0x1e, 0x76, 0xb2, 0x91, 0x12, 0x7f, 0xe9, 0xfe, 0x97, 0x48, 0x89, 0xef, 0x55, 0x0a, + 0x29, 0xf9, 0x7d, 0x2e, 0xe8, 0xfa, 0x84, 0xa4, 0xe4, 0x19, 0xbc, 0x70, 0x7c, 0xe3, 0x78, 0x8d, + 0xfc, 0x49, 0x1e, 0x9c, 0x8b, 0xd6, 0x61, 0xa8, 0x41, 0x4a, 0x63, 0x1b, 0x64, 0x0b, 0x2c, 0x3e, + 0xb0, 0x55, 0x75, 0xc0, 0x63, 0x08, 0x74, 0x49, 0xa7, 0xb5, 0xfe, 0xbf, 0xb0, 0x5c, 0xfc, 0x61, + 0x82, 0x0e, 0x4a, 0xb4, 0x8c, 0xf7, 0xcb, 0xc2, 0x97, 0xed, 0x97, 0xc5, 0x13, 0xf4, 0xcb, 0x64, + 0xca, 0x91, 0x3f, 0x11, 0xe5, 0x98, 0xac, 0x59, 0x26, 0x1c, 0x5c, 0x63, 0xaf, 0xfe, 0x23, 0x09, + 0x2c, 0x25, 0x5f, 0xb8, 0xa1, 0x0a, 0xe6, 0x7a, 0xf8, 0x61, 0xf0, 0xe1, 0x63, 0x5c, 0x13, 0xb1, + 0xa9, 0xa2, 0xd6, 0x9d, 0x4f, 0x46, 0xf5, 0x3b, 0x1a, 0xdd, 0x33, 0xf7, 0xa9, 0xa9, 0x68, 0x5d, + 0xa7, 0xf3, 0xee, 0x84, 0xb0, 0x50, 0x04, 0x1b, 0xbe, 0x0f, 0x4a, 0x3d, 0xfc, 0x70, 0xdf, 0x36, + 0xbb, 0x49, 0x1d, 0x32, 0xdb, 0x3c, 0xbc, 0x00, 0x76, 0x04, 0x0a, 0xf2, 0xf0, 0xe4, 0x2f, 0x24, + 0xb0, 0x9c, 0xd2, 0x55, 0xbf, 0x45, 0x51, 0xee, 0x81, 0x8b, 0xa1, 0x20, 0x59, 0x55, 0x92, 0x07, + 0xb6, 0xca, 0x0b, 0x54, 0x10, 0x99, 0x2b, 0xa0, 0x6c, 0x60, 0x93, 0x2a, 0x1e, 0x0d, 0x2e, 0x36, + 0x67, 0x47, 0xc3, 0x5a, 0xb9, 0xe5, 0x0e, 0x22, 0x5f, 0x2e, 0xff, 0x47, 0x02, 0xc5, 0xfd, 0x36, + 0x56, 0xc9, 0x29, 0x30, 0x89, 0xad, 0x10, 0x93, 0x48, 0x7f, 0xa5, 0xe7, 0xfe, 0xa4, 0x92, 0x88, + 0xed, 0x08, 0x89, 0xb8, 0x34, 0x06, 0xe7, 0xe9, 0xfc, 0xe1, 0x0d, 0x50, 0xf6, 0xa6, 0x9b, 0xec, + 0x70, 0x93, 0x7f, 0x97, 0x03, 0x33, 0x81, 0x29, 0x26, 0x3c, 0x1a, 0xef, 0x87, 0xfa, 0x01, 0x2b, + 0xfa, 0xf5, 0x2c, 0x81, 0xd4, 0xdd, 0xb3, 0xff, 0x6d, 0x8d, 0x9a, 0xc1, 0xcb, 0x63, 0xbc, 0x25, + 0xbc, 0x09, 0xe6, 0x28, 0x36, 0xbb, 0x84, 0xba, 0x32, 0xbe, 0x60, 0x65, 0xff, 0x31, 0xe5, 0x6e, + 0x48, 0x8a, 0x22, 0xda, 0x2b, 0x37, 0xc1, 0x6c, 0x68, 0x32, 0x78, 0x0e, 0xe4, 0x8f, 0xc9, 0xc0, + 0xa1, 0x54, 0x88, 0xfd, 0x09, 0x17, 0x41, 0xb1, 0x8f, 0x55, 0xdb, 0xc9, 0xf3, 0x32, 0x72, 0x7e, + 0xdc, 0xc8, 0xbd, 0x2e, 0xc9, 0xbf, 0x66, 0x8b, 0xe3, 0x27, 0xe7, 0x29, 0x64, 0xd7, 0x3b, 0xa1, + 0xec, 0x4a, 0xff, 0x60, 0x18, 0x2c, 0x99, 0xb4, 0x1c, 0x43, 0x91, 0x1c, 0x7b, 0x29, 0x13, 0xda, + 0xd3, 0x33, 0xed, 0x9f, 0x39, 0xb0, 0x18, 0xd0, 0xf6, 0xa9, 0xea, 0xf7, 0x43, 0x54, 0x75, 0x35, + 0x42, 0x55, 0x2b, 0x49, 0x36, 0xdf, 0x71, 0xd5, 0xf1, 0x5c, 0xf5, 0x4f, 0x12, 0x98, 0x0f, 0xac, + 0xdd, 0x29, 0x90, 0xd5, 0x3b, 0x61, 0xb2, 0x7a, 0x29, 0x4b, 0xd2, 0xa4, 0xb0, 0xd5, 0x7f, 0x49, + 0xa0, 0x11, 0xd0, 0x6a, 0x11, 0xd3, 0x52, 0x2c, 0x4a, 0x34, 0x7a, 0x4f, 0x57, 0xed, 0x1e, 0xd9, + 0x54, 0xb1, 0xd2, 0x43, 0x84, 0x0d, 0x28, 0xba, 0xd6, 0xd2, 0x55, 0xa5, 0x3d, 0x80, 0x18, 0xcc, + 0x7c, 0x78, 0x44, 0xb4, 0x2d, 0xa2, 0x12, 0x2a, 0x3e, 0x6b, 0x95, 0x9b, 0x6f, 0xb9, 0x5f, 0x79, + 0xde, 0xf3, 0x45, 0x4f, 0x86, 0xb5, 0xd5, 0x2c, 0x88, 0x3c, 0xcb, 0x82, 0x98, 0xf0, 0xa7, 0x00, + 0xb0, 0x9f, 0xfc, 0x3c, 0xea, 0x88, 0x84, 0x7b, 0xd3, 0xad, 0xca, 0xf7, 0x3c, 0xc9, 0x44, 0x13, + 0x04, 0x10, 0xe5, 0xbf, 0x4e, 0x87, 0xf6, 0xec, 0x5b, 0xff, 0x74, 0xf8, 0x73, 0xb0, 0xd8, 0xf7, + 0x57, 0xc7, 0x55, 0x60, 0xa4, 0x36, 0x1f, 0xbd, 0x1e, 0x7b, 0xf0, 0x49, 0xeb, 0xea, 0x53, 0xe9, + 0x7b, 0x09, 0x70, 0x28, 0x71, 0x12, 0xf8, 0x2a, 0x98, 0x61, 0x64, 0x54, 0x69, 0x93, 0x5d, 0xdc, + 0x73, 0xeb, 0xc9, 0xfb, 0x2a, 0xb8, 0xef, 0x8b, 0x50, 0x50, 0x0f, 0x1e, 0x81, 0x05, 0x43, 0xef, + 0xec, 0x60, 0x0d, 0x77, 0x09, 0x63, 0x57, 0xce, 0x56, 0xf2, 0xf7, 0xc4, 0x72, 0xf3, 0x35, 0xf7, + 0xad, 0xa8, 0x15, 0x57, 0x61, 0xf7, 0xee, 0x84, 0x61, 0x9e, 0x04, 0x49, 0x90, 0xd0, 0x8c, 0x7d, + 0xc9, 0x76, 0x5e, 0xf2, 0xd7, 0xb3, 0x14, 0xd6, 0x09, 0xbf, 0x65, 0xa7, 0x3d, 0x97, 0x96, 0x4e, + 0xf4, 0x5c, 0x9a, 0x70, 0x6f, 0x2c, 0x4f, 0x78, 0x6f, 0xfc, 0x44, 0x02, 0x97, 0x8c, 0x0c, 0xb5, + 0x54, 0x01, 0x7c, 0x6d, 0x6e, 0x67, 0x59, 0x9b, 0x2c, 0xb5, 0xd9, 0x5c, 0x1d, 0x0d, 0x6b, 0x97, + 0xb2, 0x68, 0xa2, 0x4c, 0xfe, 0xc9, 0x1f, 0x15, 0xc1, 0xf9, 0x58, 0xb7, 0xfc, 0x1a, 0x1f, 0x6f, + 0x63, 0x37, 0xc9, 0xfc, 0x04, 0x37, 0xc9, 0x0d, 0x30, 0x2f, 0xfe, 0x1f, 0x20, 0x72, 0x11, 0xf5, + 0x36, 0x76, 0x33, 0x2c, 0x46, 0x51, 0xfd, 0xa4, 0xc7, 0xe3, 0xe2, 0x84, 0x8f, 0xc7, 0x41, 0x2f, + 0xc4, 0xff, 0xb7, 0x39, 0x65, 0x18, 0xf7, 0x42, 0xfc, 0x9b, 0x5b, 0x54, 0x9f, 0x91, 0x44, 0x07, + 0xd5, 0x43, 0x98, 0x0e, 0x93, 0xc4, 0x83, 0x90, 0x14, 0x45, 0xb4, 0xbf, 0xd4, 0x37, 0x6f, 0x9c, + 0xf0, 0xcd, 0x7b, 0x2d, 0x4b, 0xfe, 0x66, 0x7f, 0x27, 0x4e, 0xbc, 0xf1, 0xcf, 0x4c, 0x7e, 0xe3, + 0x97, 0xff, 0x22, 0x81, 0xe7, 0x52, 0x4f, 0x17, 0xb8, 0x11, 0xa2, 0x70, 0x6b, 0x11, 0x0a, 0xf7, + 0xbd, 0x54, 0xc3, 0x00, 0x8f, 0x33, 0x93, 0x9f, 0x90, 0xdf, 0xc8, 0xf6, 0x84, 0x9c, 0x70, 0x0f, + 0x1c, 0xff, 0x96, 0xdc, 0x5c, 0x7b, 0xf4, 0xb8, 0x7a, 0xe6, 0xd3, 0xc7, 0xd5, 0x33, 0x9f, 0x3f, + 0xae, 0x9e, 0xf9, 0xc5, 0xa8, 0x2a, 0x3d, 0x1a, 0x55, 0xa5, 0x4f, 0x47, 0x55, 0xe9, 0xf3, 0x51, + 0x55, 0xfa, 0xdb, 0xa8, 0x2a, 0xfd, 0xe6, 0x8b, 0xea, 0x99, 0xf7, 0xa7, 0xc5, 0x8c, 0xff, 0x0d, + 0x00, 0x00, 0xff, 0xff, 0x3e, 0x13, 0x3b, 0xc7, 0x94, 0x2b, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -2523,6 +2560,39 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.WhenScaled) + copy(dAtA[i:], m.WhenScaled) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenScaled))) + i-- + dAtA[i] = 0x12 + i -= len(m.WhenDeleted) + copy(dAtA[i:], m.WhenDeleted) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenDeleted))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2543,6 +2613,18 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PersistentVolumeClaimRetentionPolicy != nil { + { + size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) i-- dAtA[i] = 0x48 @@ -3232,6 +3314,19 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WhenDeleted) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenScaled) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetSpec) Size() (n int) { if m == nil { return 0 @@ -3263,6 +3358,10 @@ func (m *StatefulSetSpec) Size() (n int) { n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.PersistentVolumeClaimRetentionPolicy != nil { + l = m.PersistentVolumeClaimRetentionPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3720,6 +3819,17 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetPersistentVolumeClaimRetentionPolicy{`, + `WhenDeleted:` + fmt.Sprintf("%v", this.WhenDeleted) + `,`, + `WhenScaled:` + fmt.Sprintf("%v", this.WhenScaled) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetSpec) String() string { if this == nil { return "nil" @@ -3739,6 +3849,7 @@ func (this *StatefulSetSpec) String() string { `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, `}`, }, "") return s @@ -8228,6 +8339,120 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetPersistentVolumeClaimRetentionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenDeleted", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenDeleted = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenScaled", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenScaled = PersistentVolumeClaimRetentionPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8516,6 +8741,42 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaimRetentionPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaimRetentionPolicy == nil { + m.PersistentVolumeClaimRetentionPolicy = &StatefulSetPersistentVolumeClaimRetentionPolicy{} + } + if err := m.PersistentVolumeClaimRetentionPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index fb3f05ea2..6d9505865 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -169,8 +169,8 @@ message DaemonSetStatus { // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ optional int32 desiredNumberScheduled = 3; - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. + // Total number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running with a Ready Condition by passing the readinessProbe. optional int32 numberReady = 4; // The most recent generation observed by the daemon set controller. @@ -327,7 +327,7 @@ message DeploymentStatus { // +optional optional int32 updatedReplicas = 3; - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. // +optional optional int32 readyReplicas = 7; @@ -463,7 +463,7 @@ message ReplicaSetStatus { // +optional optional int32 fullyLabeledReplicas = 2; - // The number of ready replicas for this replica set. + // readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition. // +optional optional int32 readyReplicas = 4; @@ -656,6 +656,23 @@ message StatefulSetList { repeated StatefulSet items = 2; } +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +message StatefulSetPersistentVolumeClaimRetentionPolicy { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + optional string whenDeleted = 1; + + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + optional string whenScaled = 2; +} + // A StatefulSetSpec is the specification of a StatefulSet. message StatefulSetSpec { // replicas is the desired number of replicas of the given Template. @@ -722,6 +739,12 @@ message StatefulSetSpec { // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional optional int32 minReadySeconds = 9; + + // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from + // the StatefulSet VolumeClaimTemplates. This requires the + // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // +optional + optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -734,7 +757,7 @@ message StatefulSetStatus { // replicas is the number of Pods created by the StatefulSet controller. optional int32 replicas = 2; - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition. optional int32 readyReplicas = 3; // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -766,9 +789,7 @@ message StatefulSetStatus { repeated StatefulSetCondition conditions = 10; // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. - // Remove omitempty when graduating to beta - // +optional + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 67e10b971..332bc7ed8 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -169,6 +169,40 @@ type RollingUpdateStatefulSetStrategy struct { Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` } +// PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine +// when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is +// deleted or scaled down. +type PersistentVolumeClaimRetentionPolicyType string + +const ( + // RetainPersistentVolumeClaimRetentionPolicyType is the default + // PersistentVolumeClaimRetentionPolicy and specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will not be deleted. + RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" + // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates + // will be deleted in the scenario specified in + // StatefulSetPersistentVolumeClaimRetentionPolicy. + RetentionPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs +// created from the StatefulSet VolumeClaimTemplates. +type StatefulSetPersistentVolumeClaimRetentionPolicy struct { + // WhenDeleted specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is deleted. The default policy + // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The + // `Delete` policy causes those PVCs to be deleted. + WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` + // WhenScaled specifies what happens to PVCs created from StatefulSet + // VolumeClaimTemplates when the StatefulSet is scaled down. The default + // policy of `Retain` causes PVCs to not be affected by a scaledown. The + // `Delete` policy causes the associated PVCs for any excess pods above + // the replica count to be deleted. + WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -235,6 +269,12 @@ type StatefulSetSpec struct { // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from + // the StatefulSet VolumeClaimTemplates. This requires the + // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // +optional + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -247,7 +287,7 @@ type StatefulSetStatus struct { // replicas is the number of Pods created by the StatefulSet controller. Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + // readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition. ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version @@ -279,10 +319,8 @@ type StatefulSetStatus struct { Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. - // Remove omitempty when graduating to beta - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,11,opt,name=availableReplicas"` + // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } type StatefulSetConditionType string @@ -463,7 +501,7 @@ type DeploymentStatus struct { // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - // Total number of ready pods targeted by this deployment. + // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` @@ -656,8 +694,8 @@ type DaemonSetStatus struct { // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. + // Total number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running with a Ready Condition by passing the readinessProbe. NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` // The most recent generation observed by the daemon set controller. @@ -860,7 +898,7 @@ type ReplicaSetStatus struct { // +optional FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - // The number of ready replicas for this replica set. + // readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 1f9912203..454c632dc 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -100,7 +100,7 @@ var map_DaemonSetStatus = map[string]string{ "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "numberReady": "Total number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition by passing the readinessProbe.", "observedGeneration": "The most recent generation observed by the daemon set controller.", "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", @@ -179,7 +179,7 @@ var map_DeploymentStatus = map[string]string{ "observedGeneration": "The generation observed by the deployment controller.", "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", + "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", "conditions": "Represents the latest available observations of a deployment's current state.", @@ -250,7 +250,7 @@ var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "readyReplicas": "The number of ready replicas for this replica set.", + "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", "conditions": "Represents the latest available observations of a replica set's current state.", @@ -351,17 +351,28 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ + "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", + "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", +} + +func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { + return map_StatefulSetPersistentVolumeClaimRetentionPolicy +} + var map_StatefulSetSpec = map[string]string{ - "": "A StatefulSetSpec is the specification of a StatefulSet.", - "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", - "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", - "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", - "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -372,14 +383,14 @@ var map_StatefulSetStatus = map[string]string{ "": "StatefulSetStatus represents the current state of a StatefulSet.", "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", "replicas": "replicas is the number of Pods created by the StatefulSet controller.", - "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "readyReplicas": "readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition.", "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. Remove omitempty when graduating to beta", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 0a84d1b08..8293b9886 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -754,6 +755,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy. +func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy { + if in == nil { + return nil + } + out := new(StatefulSetPersistentVolumeClaimRetentionPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = *in @@ -781,6 +798,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(int32) **out = **in } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } return } diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go index 3368a1896..1ead8a4c1 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index aca99c42b..2af533191 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index a5d82a810..059ec1a86 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go index 9b5744db7..e448106e4 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go index 1d11b38b0..f1d49eb38 100644 --- a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go index 58b2dfe75..13f09cf2d 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.prerelease-lifecycle.go index fcb75dd91..f9817df58 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 93b1efb51..8e0a46525 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -150,6 +150,7 @@ type ScaleStatus struct { // the types below are used in the alpha metrics annotation // MetricSourceType indicates the type of metric. +// +enum type MetricSourceType string const ( diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index 05ae6ebda..603e6aa8c 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/autoscaling/v2/doc.go b/vendor/k8s.io/api/autoscaling/v2/doc.go new file mode 100644 index 000000000..f96a059b6 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v2 // import "k8s.io/api/autoscaling/v2" diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go new file mode 100644 index 000000000..408413a78 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go @@ -0,0 +1,6599 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2/generated.proto + +package v2 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } +func (*ContainerResourceMetricSource) ProtoMessage() {} +func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{0} +} +func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src) +} +func (m *ContainerResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo + +func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } +func (*ContainerResourceMetricStatus) ProtoMessage() {} +func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{1} +} +func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src) +} +func (m *ContainerResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{2} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{3} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{4} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } +func (*HPAScalingPolicy) ProtoMessage() {} +func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{5} +} +func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HPAScalingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HPAScalingPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_HPAScalingPolicy.Merge(m, src) +} +func (m *HPAScalingPolicy) XXX_Size() int { + return m.Size() +} +func (m *HPAScalingPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_HPAScalingPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo + +func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } +func (*HPAScalingRules) ProtoMessage() {} +func (*HPAScalingRules) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{6} +} +func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HPAScalingRules) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HPAScalingRules) XXX_Merge(src proto.Message) { + xxx_messageInfo_HPAScalingRules.Merge(m, src) +} +func (m *HPAScalingRules) XXX_Size() int { + return m.Size() +} +func (m *HPAScalingRules) XXX_DiscardUnknown() { + xxx_messageInfo_HPAScalingRules.DiscardUnknown(m) +} + +var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{7} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } +func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} +func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{8} +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerBehavior.Merge(m, src) +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerBehavior) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerBehavior.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } +func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} +func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{9} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{10} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{11} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{12} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } +func (*MetricIdentifier) ProtoMessage() {} +func (*MetricIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{13} +} +func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricIdentifier.Merge(m, src) +} +func (m *MetricIdentifier) XXX_Size() int { + return m.Size() +} +func (m *MetricIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_MetricIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{14} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{15} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *MetricTarget) Reset() { *m = MetricTarget{} } +func (*MetricTarget) ProtoMessage() {} +func (*MetricTarget) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{16} +} +func (m *MetricTarget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricTarget) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricTarget.Merge(m, src) +} +func (m *MetricTarget) XXX_Size() int { + return m.Size() +} +func (m *MetricTarget) XXX_DiscardUnknown() { + xxx_messageInfo_MetricTarget.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricTarget proto.InternalMessageInfo + +func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } +func (*MetricValueStatus) ProtoMessage() {} +func (*MetricValueStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{17} +} +func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricValueStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricValueStatus.Merge(m, src) +} +func (m *MetricValueStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricValueStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricValueStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{18} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{19} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{20} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{21} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{22} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_b14d4df4b5f3935e, []int{23} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2.ContainerResourceMetricSource") + proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ContainerResourceMetricStatus") + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ExternalMetricStatus") + proto.RegisterType((*HPAScalingPolicy)(nil), "k8s.io.api.autoscaling.v2.HPAScalingPolicy") + proto.RegisterType((*HPAScalingRules)(nil), "k8s.io.api.autoscaling.v2.HPAScalingRules") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerBehavior)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerBehavior") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricIdentifier)(nil), "k8s.io.api.autoscaling.v2.MetricIdentifier") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v2.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v2.MetricStatus") + proto.RegisterType((*MetricTarget)(nil), "k8s.io.api.autoscaling.v2.MetricTarget") + proto.RegisterType((*MetricValueStatus)(nil), "k8s.io.api.autoscaling.v2.MetricValueStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v2.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v2.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v2.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ResourceMetricStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2/generated.proto", fileDescriptor_b14d4df4b5f3935e) +} + +var fileDescriptor_b14d4df4b5f3935e = []byte{ + // 1735 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0x1b, 0xc7, + 0x19, 0xd7, 0x92, 0xd4, 0x6b, 0xa8, 0xe7, 0xf8, 0xc5, 0xca, 0x30, 0x29, 0x6c, 0xdd, 0xda, 0x75, + 0xeb, 0x65, 0xcd, 0xba, 0x86, 0x51, 0x9f, 0xb4, 0x72, 0x5b, 0x0b, 0x96, 0x2a, 0x7a, 0x68, 0x5b, + 0x6d, 0xd1, 0x16, 0x1e, 0xee, 0x8e, 0xa8, 0xa9, 0xc8, 0x5d, 0x62, 0x77, 0x49, 0x5b, 0x06, 0x0a, + 0xf4, 0xd2, 0x7b, 0xd1, 0xc2, 0x28, 0xfa, 0x3f, 0x18, 0x39, 0x25, 0x70, 0x0e, 0x09, 0x10, 0x20, + 0x39, 0xf8, 0x12, 0xc0, 0x87, 0x1c, 0x7c, 0x22, 0x62, 0x06, 0xc8, 0x31, 0x7f, 0x80, 0x4f, 0xc1, + 0x3c, 0xf6, 0xc9, 0x97, 0xe8, 0xc8, 0x02, 0x74, 0xe3, 0xcc, 0x7c, 0xdf, 0xef, 0x7b, 0xcc, 0xf7, + 0x9a, 0x25, 0xd0, 0xf7, 0x6f, 0xba, 0x1a, 0xb5, 0x8b, 0xfb, 0xad, 0x2a, 0x71, 0x2c, 0xe2, 0x11, + 0xb7, 0xd8, 0x26, 0x96, 0x69, 0x3b, 0x45, 0x79, 0x80, 0x9b, 0xb4, 0x88, 0x5b, 0x9e, 0xed, 0x1a, + 0xb8, 0x4e, 0xad, 0x5a, 0xb1, 0x5d, 0x2a, 0xd6, 0x88, 0x45, 0x1c, 0xec, 0x11, 0x53, 0x6b, 0x3a, + 0xb6, 0x67, 0xc3, 0x1f, 0x09, 0x52, 0x0d, 0x37, 0xa9, 0x16, 0x21, 0xd5, 0xda, 0xa5, 0x95, 0xab, + 0x35, 0xea, 0xed, 0xb5, 0xaa, 0x9a, 0x61, 0x37, 0x8a, 0x35, 0xbb, 0x66, 0x17, 0x39, 0x47, 0xb5, + 0xb5, 0xcb, 0x57, 0x7c, 0xc1, 0x7f, 0x09, 0xa4, 0x15, 0x35, 0x22, 0xd4, 0xb0, 0x1d, 0x52, 0x6c, + 0x5f, 0x4b, 0x4a, 0x5b, 0xb9, 0x1e, 0xd2, 0x34, 0xb0, 0xb1, 0x47, 0x2d, 0xe2, 0x1c, 0x14, 0x9b, + 0xfb, 0x35, 0xce, 0xe4, 0x10, 0xd7, 0x6e, 0x39, 0x06, 0x19, 0x8b, 0xcb, 0x2d, 0x36, 0x88, 0x87, + 0xfb, 0xc9, 0x2a, 0x0e, 0xe2, 0x72, 0x5a, 0x96, 0x47, 0x1b, 0xbd, 0x62, 0x6e, 0x8c, 0x62, 0x70, + 0x8d, 0x3d, 0xd2, 0xc0, 0x49, 0x3e, 0xf5, 0x5b, 0x05, 0x5c, 0x58, 0xb7, 0x2d, 0x0f, 0x33, 0x0e, + 0x24, 0x8d, 0xd8, 0x22, 0x9e, 0x43, 0x8d, 0x0a, 0xff, 0x0d, 0xd7, 0x41, 0xc6, 0xc2, 0x0d, 0x92, + 0x53, 0x56, 0x95, 0xcb, 0xb3, 0x7a, 0xf1, 0x65, 0xa7, 0x30, 0xd1, 0xed, 0x14, 0x32, 0x7f, 0xc0, + 0x0d, 0xf2, 0xb6, 0x53, 0x28, 0xf4, 0x3a, 0x4e, 0xf3, 0x61, 0x18, 0x09, 0xe2, 0xcc, 0x70, 0x1b, + 0x4c, 0x79, 0xd8, 0xa9, 0x11, 0x2f, 0x97, 0x5a, 0x55, 0x2e, 0x67, 0x4b, 0x97, 0xb4, 0x81, 0x57, + 0xa7, 0x09, 0xe9, 0xf7, 0x39, 0xb9, 0xbe, 0x20, 0xe5, 0x4d, 0x89, 0x35, 0x92, 0x30, 0xb0, 0x08, + 0x66, 0x0d, 0x5f, 0xed, 0x5c, 0x9a, 0xab, 0xb6, 0x2c, 0x49, 0x67, 0x43, 0x7b, 0x42, 0x1a, 0xf5, + 0xbb, 0x21, 0x86, 0x7a, 0xd8, 0x6b, 0xb9, 0x47, 0x63, 0xe8, 0x0e, 0x98, 0x36, 0x5a, 0x8e, 0x43, + 0x2c, 0xdf, 0xd2, 0x5f, 0x8c, 0xb4, 0xf4, 0x21, 0xae, 0xb7, 0x88, 0xd0, 0x41, 0x5f, 0x94, 0x52, + 0xa7, 0xd7, 0x05, 0x08, 0xf2, 0xd1, 0xc6, 0x37, 0xf8, 0x99, 0x02, 0xce, 0xaf, 0x3b, 0xb6, 0xeb, + 0x3e, 0x24, 0x8e, 0x4b, 0x6d, 0x6b, 0xbb, 0xfa, 0x77, 0x62, 0x78, 0x88, 0xec, 0x12, 0x87, 0x58, + 0x06, 0x81, 0xab, 0x20, 0xb3, 0x4f, 0x2d, 0x53, 0x9a, 0x3b, 0xe7, 0x9b, 0x7b, 0x97, 0x5a, 0x26, + 0xe2, 0x27, 0x8c, 0x82, 0x3b, 0x24, 0x15, 0xa7, 0x88, 0x58, 0x5b, 0x02, 0x00, 0x37, 0xa9, 0x14, + 0x20, 0xb5, 0x82, 0x92, 0x0e, 0xac, 0x95, 0x37, 0xe4, 0x09, 0x8a, 0x50, 0xa9, 0x9f, 0x28, 0xe0, + 0xf4, 0x6f, 0x9f, 0x78, 0xc4, 0xb1, 0x70, 0x3d, 0x16, 0x68, 0x15, 0x30, 0xd5, 0xe0, 0x6b, 0xae, + 0x52, 0xb6, 0xf4, 0xf3, 0x91, 0x9e, 0xdb, 0x30, 0x89, 0xe5, 0xd1, 0x5d, 0x4a, 0x9c, 0x30, 0x4e, + 0xc4, 0x09, 0x92, 0x50, 0x47, 0x1e, 0x78, 0xea, 0x17, 0xbd, 0xea, 0x8b, 0xf0, 0x79, 0x2f, 0xea, + 0xbf, 0xaf, 0x70, 0x52, 0x3f, 0x50, 0xc0, 0xd2, 0x9d, 0xf2, 0x5a, 0x45, 0x70, 0x97, 0xed, 0x3a, + 0x35, 0x0e, 0xe0, 0x4d, 0x90, 0xf1, 0x0e, 0x9a, 0x7e, 0x06, 0x5c, 0xf4, 0x2f, 0xfc, 0xfe, 0x41, + 0x93, 0x65, 0xc0, 0xe9, 0x24, 0x3d, 0xdb, 0x47, 0x9c, 0x03, 0xfe, 0x18, 0x4c, 0xb6, 0x99, 0x5c, + 0xae, 0xe5, 0xa4, 0x3e, 0x2f, 0x59, 0x27, 0xb9, 0x32, 0x48, 0x9c, 0xc1, 0x5b, 0x60, 0xbe, 0x49, + 0x1c, 0x6a, 0x9b, 0x15, 0x62, 0xd8, 0x96, 0xe9, 0xf2, 0x80, 0x99, 0xd4, 0xcf, 0x48, 0xe2, 0xf9, + 0x72, 0xf4, 0x10, 0xc5, 0x69, 0xd5, 0xff, 0xa7, 0xc0, 0x62, 0xa8, 0x00, 0x6a, 0xd5, 0x89, 0x0b, + 0xff, 0x06, 0x56, 0x5c, 0x0f, 0x57, 0x69, 0x9d, 0x3e, 0xc5, 0x1e, 0xb5, 0xad, 0x1d, 0x6a, 0x99, + 0xf6, 0xe3, 0x38, 0x7a, 0xbe, 0xdb, 0x29, 0xac, 0x54, 0x06, 0x52, 0xa1, 0x21, 0x08, 0xf0, 0x2e, + 0x98, 0x73, 0x49, 0x9d, 0x18, 0x9e, 0xb0, 0x57, 0xfa, 0xe5, 0x52, 0xb7, 0x53, 0x98, 0xab, 0x44, + 0xf6, 0xdf, 0x76, 0x0a, 0xa7, 0x62, 0x8e, 0x11, 0x87, 0x28, 0xc6, 0x0c, 0xff, 0x04, 0x66, 0x9a, + 0xec, 0x17, 0x25, 0x6e, 0x2e, 0xb5, 0x9a, 0x1e, 0x11, 0x21, 0x49, 0x5f, 0xeb, 0x4b, 0xd2, 0x4b, + 0x33, 0x65, 0x09, 0x82, 0x02, 0x38, 0xf5, 0x45, 0x0a, 0x9c, 0xbb, 0x63, 0x3b, 0xf4, 0x29, 0x4b, + 0xfe, 0x7a, 0xd9, 0x36, 0xd7, 0x24, 0x18, 0x71, 0xe0, 0x23, 0x30, 0xc3, 0x9a, 0x8c, 0x89, 0x3d, + 0x2c, 0x03, 0xf3, 0x97, 0x11, 0xb1, 0x41, 0xaf, 0xd0, 0x9a, 0xfb, 0x35, 0xb6, 0xe1, 0x6a, 0x8c, + 0x5a, 0x6b, 0x5f, 0xd3, 0x44, 0xbd, 0xd8, 0x22, 0x1e, 0x0e, 0x53, 0x3a, 0xdc, 0x43, 0x01, 0x2a, + 0xfc, 0x23, 0xc8, 0xb8, 0x4d, 0x62, 0xc8, 0x00, 0xbd, 0x31, 0xcc, 0xa8, 0xfe, 0x3a, 0x56, 0x9a, + 0xc4, 0x08, 0xcb, 0x0b, 0x5b, 0x21, 0x8e, 0x08, 0x1f, 0x81, 0x29, 0x97, 0x07, 0x32, 0xbf, 0xcb, + 0x6c, 0xe9, 0xe6, 0x3b, 0x60, 0x8b, 0x44, 0x08, 0xf2, 0x4b, 0xac, 0x91, 0xc4, 0x55, 0xbf, 0x54, + 0x40, 0x61, 0x00, 0xa7, 0x4e, 0xf6, 0x70, 0x9b, 0xda, 0x0e, 0xbc, 0x07, 0xa6, 0xf9, 0xce, 0x83, + 0xa6, 0x74, 0xe0, 0x95, 0x43, 0xdd, 0x1b, 0x0f, 0x51, 0x3d, 0xcb, 0xb2, 0xaf, 0x22, 0xd8, 0x91, + 0x8f, 0x03, 0x77, 0xc0, 0x2c, 0xff, 0x79, 0xdb, 0x7e, 0x6c, 0x49, 0xbf, 0x8d, 0x03, 0x3a, 0xcf, + 0x8a, 0x7e, 0xc5, 0x07, 0x40, 0x21, 0x96, 0xfa, 0xaf, 0x34, 0x58, 0x1d, 0x60, 0xcf, 0xba, 0x6d, + 0x99, 0x94, 0xc5, 0x38, 0xbc, 0x13, 0x4b, 0xf3, 0xeb, 0x89, 0x34, 0xbf, 0x38, 0x8a, 0x3f, 0x92, + 0xf6, 0x9b, 0xc1, 0x05, 0xa5, 0x62, 0x58, 0xd2, 0xcd, 0x6f, 0x3b, 0x85, 0x3e, 0x83, 0x95, 0x16, + 0x20, 0xc5, 0x2f, 0x03, 0xb6, 0x01, 0xac, 0x63, 0xd7, 0xbb, 0xef, 0x60, 0xcb, 0x15, 0x92, 0x68, + 0x83, 0xc8, 0xab, 0xbf, 0x72, 0xb8, 0xa0, 0x65, 0x1c, 0xfa, 0x8a, 0xd4, 0x02, 0x6e, 0xf6, 0xa0, + 0xa1, 0x3e, 0x12, 0xe0, 0x4f, 0xc1, 0x94, 0x43, 0xb0, 0x6b, 0x5b, 0xb9, 0x0c, 0xb7, 0x22, 0x08, + 0x16, 0xc4, 0x77, 0x91, 0x3c, 0x85, 0x3f, 0x03, 0xd3, 0x0d, 0xe2, 0xba, 0xb8, 0x46, 0x72, 0x93, + 0x9c, 0x30, 0x28, 0xaf, 0x5b, 0x62, 0x1b, 0xf9, 0xe7, 0xea, 0x57, 0x0a, 0x38, 0x3f, 0xc0, 0x8f, + 0x9b, 0xd4, 0xf5, 0xe0, 0x5f, 0x7a, 0xb2, 0x52, 0x3b, 0x9c, 0x81, 0x8c, 0x9b, 0xe7, 0x64, 0x50, + 0x0f, 0xfc, 0x9d, 0x48, 0x46, 0xee, 0x80, 0x49, 0xea, 0x91, 0x86, 0x5f, 0x67, 0x4a, 0xe3, 0xa7, + 0x4d, 0x58, 0xc1, 0x37, 0x18, 0x10, 0x12, 0x78, 0xea, 0x8b, 0xf4, 0x40, 0xb3, 0x58, 0xda, 0xc2, + 0x36, 0x58, 0xe0, 0x2b, 0xd9, 0x33, 0xc9, 0xae, 0x34, 0x6e, 0x58, 0x51, 0x18, 0x32, 0xa3, 0xe8, + 0x67, 0xa5, 0x16, 0x0b, 0x95, 0x18, 0x2a, 0x4a, 0x48, 0x81, 0xd7, 0x40, 0xb6, 0x41, 0x2d, 0x44, + 0x9a, 0x75, 0x6a, 0x60, 0x57, 0x36, 0xa1, 0xc5, 0x6e, 0xa7, 0x90, 0xdd, 0x0a, 0xb7, 0x51, 0x94, + 0x06, 0xfe, 0x1a, 0x64, 0x1b, 0xf8, 0x49, 0xc0, 0x22, 0x9a, 0xc5, 0x29, 0x29, 0x2f, 0xbb, 0x15, + 0x1e, 0xa1, 0x28, 0x1d, 0x2c, 0xb3, 0x18, 0x60, 0x6d, 0xd6, 0xcd, 0x65, 0xb8, 0x73, 0x7f, 0x32, + 0xb2, 0x21, 0xf3, 0xf2, 0x16, 0x09, 0x15, 0xce, 0x8d, 0x7c, 0x18, 0x68, 0x82, 0x99, 0xaa, 0x2c, + 0x35, 0x3c, 0xac, 0xb2, 0xa5, 0xdf, 0xbc, 0xc3, 0x7d, 0x49, 0x04, 0x7d, 0x8e, 0x85, 0x84, 0xbf, + 0x42, 0x01, 0xb2, 0xfa, 0x3c, 0x03, 0x2e, 0x0c, 0x2d, 0x91, 0xf0, 0x77, 0x00, 0xda, 0x55, 0x97, + 0x38, 0x6d, 0x62, 0xfe, 0x5e, 0x3c, 0x12, 0xd8, 0x4c, 0xc7, 0xee, 0x2f, 0xad, 0x9f, 0x65, 0xd9, + 0xb4, 0xdd, 0x73, 0x8a, 0xfa, 0x70, 0x40, 0x03, 0xcc, 0xb3, 0x1c, 0x13, 0x37, 0x46, 0xe5, 0xf8, + 0x38, 0x5e, 0x02, 0x2f, 0xb3, 0x69, 0x60, 0x33, 0x0a, 0x82, 0xe2, 0x98, 0x70, 0x0d, 0x2c, 0xca, + 0x49, 0x26, 0x71, 0x83, 0xe7, 0xa4, 0x9f, 0x17, 0xd7, 0xe3, 0xc7, 0x28, 0x49, 0xcf, 0x20, 0x4c, + 0xe2, 0x52, 0x87, 0x98, 0x01, 0x44, 0x26, 0x0e, 0x71, 0x3b, 0x7e, 0x8c, 0x92, 0xf4, 0xb0, 0x06, + 0x16, 0x24, 0xaa, 0xbc, 0xd5, 0xdc, 0x24, 0x8f, 0x89, 0xd1, 0x43, 0xa6, 0x6c, 0x4b, 0x41, 0x7c, + 0xaf, 0xc7, 0x60, 0x50, 0x02, 0x16, 0xda, 0x00, 0x18, 0x7e, 0xd1, 0x74, 0x73, 0x53, 0x5c, 0xc8, + 0xad, 0xf1, 0xa3, 0x24, 0x28, 0xbc, 0x61, 0x47, 0x0f, 0xb6, 0x5c, 0x14, 0x11, 0xa1, 0xfe, 0x57, + 0x01, 0x4b, 0xc9, 0x21, 0x35, 0x78, 0x0f, 0x28, 0x03, 0xdf, 0x03, 0x7f, 0x05, 0x33, 0x62, 0xe6, + 0xb1, 0x1d, 0x79, 0xed, 0xbf, 0x3a, 0x64, 0x59, 0xc3, 0x55, 0x52, 0xaf, 0x48, 0x56, 0x11, 0xc4, + 0xfe, 0x0a, 0x05, 0x90, 0xea, 0xb3, 0x0c, 0x00, 0x61, 0x4e, 0xc1, 0xeb, 0xb1, 0x3e, 0xb6, 0x9a, + 0xe8, 0x63, 0x4b, 0xd1, 0xc7, 0x45, 0xa4, 0x67, 0xdd, 0x03, 0x53, 0x36, 0x2f, 0x33, 0x52, 0xc3, + 0xab, 0x43, 0xfc, 0x18, 0xcc, 0x3b, 0x01, 0x90, 0x0e, 0x58, 0x63, 0x90, 0x75, 0x4a, 0x02, 0xc1, + 0x0d, 0x90, 0x69, 0xda, 0xa6, 0x3f, 0xa5, 0x0c, 0x1b, 0xeb, 0xca, 0xb6, 0xe9, 0xc6, 0xe0, 0x66, + 0x98, 0xc6, 0x6c, 0x17, 0x71, 0x08, 0x36, 0x25, 0xfa, 0x9f, 0x12, 0x78, 0x38, 0x66, 0x4b, 0xc5, + 0x21, 0x70, 0xfd, 0x1e, 0xec, 0xc2, 0x7b, 0xfe, 0x09, 0x0a, 0xe0, 0xe0, 0x3f, 0xc0, 0xb2, 0x91, + 0x7c, 0x00, 0xe7, 0xa6, 0x47, 0x0e, 0x56, 0x43, 0xbf, 0x0e, 0xe8, 0x67, 0xba, 0x9d, 0xc2, 0x72, + 0x0f, 0x09, 0xea, 0x95, 0xc4, 0x2c, 0x23, 0xf2, 0xdd, 0x24, 0xeb, 0xdc, 0x30, 0xcb, 0xfa, 0xbd, + 0x10, 0x85, 0x65, 0xfe, 0x09, 0x0a, 0xe0, 0xd4, 0xff, 0x65, 0xc0, 0x5c, 0xec, 0x2d, 0x76, 0xcc, + 0x91, 0x21, 0x92, 0xf9, 0xc8, 0x22, 0x43, 0xc0, 0x1d, 0x69, 0x64, 0x08, 0xc8, 0x63, 0x8a, 0x0c, + 0x21, 0xec, 0x98, 0x22, 0x23, 0x62, 0x59, 0x9f, 0xc8, 0xf8, 0x3c, 0xe5, 0x47, 0x86, 0x18, 0x16, + 0x0e, 0x17, 0x19, 0x82, 0x36, 0x12, 0x19, 0xdb, 0xd1, 0xe7, 0xed, 0x88, 0x59, 0x4d, 0xf3, 0xdd, + 0xaa, 0xdd, 0x6b, 0x61, 0xcb, 0xa3, 0xde, 0x81, 0x3e, 0xdb, 0xf3, 0x14, 0x36, 0xc1, 0x1c, 0x6e, + 0x13, 0x07, 0xd7, 0x08, 0xdf, 0x96, 0xf1, 0x31, 0x2e, 0xee, 0x12, 0x7b, 0x89, 0xae, 0x45, 0x70, + 0x50, 0x0c, 0x95, 0xb5, 0x74, 0xb9, 0x7e, 0xe0, 0x05, 0x4f, 0x5c, 0xd9, 0xe5, 0x78, 0x4b, 0x5f, + 0xeb, 0x39, 0x45, 0x7d, 0x38, 0xd4, 0xff, 0xa4, 0xc0, 0x72, 0xcf, 0xc7, 0x85, 0xd0, 0x29, 0xca, + 0x7b, 0x72, 0x4a, 0xea, 0x18, 0x9d, 0x92, 0x1e, 0xdb, 0x29, 0x1f, 0xa5, 0x00, 0xec, 0xed, 0x0f, + 0xf0, 0x80, 0x8f, 0x15, 0x86, 0x43, 0xab, 0xc4, 0x14, 0xc7, 0x3f, 0x70, 0x06, 0x8e, 0x8e, 0x23, + 0x51, 0x58, 0x94, 0x94, 0x73, 0xf4, 0x1f, 0x59, 0xc3, 0x4f, 0x5a, 0xe9, 0x23, 0xfb, 0xa4, 0xa5, + 0x7e, 0x9a, 0xf4, 0xdb, 0x09, 0xfc, 0x7c, 0xd6, 0xef, 0x96, 0xd3, 0xc7, 0x73, 0xcb, 0xea, 0xc7, + 0x0a, 0x58, 0x4a, 0x8e, 0x11, 0x27, 0xe4, 0xdb, 0xe9, 0x67, 0x71, 0xd5, 0x4f, 0xe2, 0x77, 0xd3, + 0xe7, 0x0a, 0x38, 0x7d, 0x72, 0xfe, 0x26, 0x51, 0x3f, 0xec, 0x55, 0xf7, 0x04, 0xfc, 0xd9, 0xa1, + 0x5f, 0x7e, 0xf9, 0x26, 0x3f, 0xf1, 0xea, 0x4d, 0x7e, 0xe2, 0xf5, 0x9b, 0xfc, 0xc4, 0x3f, 0xbb, + 0x79, 0xe5, 0x65, 0x37, 0xaf, 0xbc, 0xea, 0xe6, 0x95, 0xd7, 0xdd, 0xbc, 0xf2, 0x75, 0x37, 0xaf, + 0xfc, 0xfb, 0x9b, 0xfc, 0xc4, 0x9f, 0x53, 0xed, 0xd2, 0xf7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x72, + 0xba, 0x02, 0x95, 0x47, 0x1c, 0x00, 0x00, +} + +func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HPAScalingPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HPAScalingPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HPAScalingPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HPAScalingRules) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HPAScalingRules) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HPAScalingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StabilizationWindowSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds)) + i-- + dAtA[i] = 0x18 + } + if len(m.Policies) > 0 { + for iNdEx := len(m.Policies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Policies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.SelectPolicy != nil { + i -= len(*m.SelectPolicy) + copy(dAtA[i:], *m.SelectPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelectPolicy))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerBehavior) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerBehavior) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerBehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ScaleDown != nil { + { + size, err := m.ScaleDown.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ScaleUp != nil { + { + size, err := m.ScaleUp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Behavior != nil { + { + size, err := m.Behavior.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 + if m.MinReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.CurrentMetrics) > 0 { + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricIdentifier) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ContainerResource != nil { + { + size, err := m.ContainerResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricTarget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricTarget) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricTarget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x20 + } + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricValueStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricValueStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x18 + } + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ContainerResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HPAScalingPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Value)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + return n +} + +func (m *HPAScalingRules) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SelectPolicy != nil { + l = len(*m.SelectPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Policies) > 0 { + for _, e := range m.Policies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StabilizationWindowSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds)) + } + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerBehavior) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScaleUp != nil { + l = m.ScaleUp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ScaleDown != nil { + l = m.ScaleDown.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Behavior != nil { + l = m.Behavior.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, e := range m.CurrentMetrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MetricIdentifier) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ContainerResource != nil { + l = m.ContainerResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricTarget) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageValue != nil { + l = m.AverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.AverageUtilization)) + } + return n +} + +func (m *MetricValueStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageValue != nil { + l = m.AverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.AverageUtilization)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DescribedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DescribedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContainerResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricSource{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HPAScalingPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HPAScalingPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *HPAScalingRules) String() string { + if this == nil { + return "nil" + } + repeatedStringForPolicies := "[]HPAScalingPolicy{" + for _, f := range this.Policies { + repeatedStringForPolicies += strings.Replace(strings.Replace(f.String(), "HPAScalingPolicy", "HPAScalingPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForPolicies += "}" + s := strings.Join([]string{`&HPAScalingRules{`, + `SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`, + `Policies:` + repeatedStringForPolicies + `,`, + `StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerBehavior) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerBehavior{`, + `ScaleUp:` + strings.Replace(this.ScaleUp.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`, + `ScaleDown:` + strings.Replace(this.ScaleDown.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, + `Behavior:` + strings.Replace(this.Behavior.String(), "HorizontalPodAutoscalerBehavior", "HorizontalPodAutoscalerBehavior", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *MetricIdentifier) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricIdentifier{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricSource", "ContainerResourceMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `ContainerResource:` + strings.Replace(this.ContainerResource.String(), "ContainerResourceMetricStatus", "ContainerResourceMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricTarget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricTarget{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, + `}`, + }, "") + return s +} +func (this *MetricValueStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricValueStatus{`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `DescribedObject:` + strings.Replace(strings.Replace(this.DescribedObject.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `DescribedObject:` + strings.Replace(strings.Replace(this.DescribedObject.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HPAScalingPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HPAScalingPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = HPAScalingPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) + } + m.PeriodSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PeriodSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HPAScalingRules: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HPAScalingRules: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelectPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScalingPolicySelect(dAtA[iNdEx:postIndex]) + m.SelectPolicy = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Policies = append(m.Policies, HPAScalingPolicy{}) + if err := m.Policies[len(m.Policies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StabilizationWindowSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StabilizationWindowSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleUp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleUp == nil { + m.ScaleUp = &HPAScalingRules{} + } + if err := m.ScaleUp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDown", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleDown == nil { + m.ScaleDown = &HPAScalingRules{} + } + if err := m.ScaleDown.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = HorizontalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, MetricSpec{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Behavior", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Behavior == nil { + m.Behavior = &HorizontalPodAutoscalerBehavior{} + } + if err := m.Behavior.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) + if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, HorizontalPodAutoscalerCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricIdentifier: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricIdentifier: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricSource{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricSource{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricStatus{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerResource == nil { + m.ContainerResource = &ContainerResourceMetricStatus{} + } + if err := m.ContainerResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricTarget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricTargetType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &resource.Quantity{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AverageValue == nil { + m.AverageValue = &resource.Quantity{} + } + if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AverageUtilization = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricValueStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricValueStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &resource.Quantity{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AverageValue == nil { + m.AverageValue = &resource.Quantity{} + } + if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AverageUtilization = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DescribedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DescribedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DescribedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DescribedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto new file mode 100644 index 000000000..e47d94a47 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -0,0 +1,504 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.autoscaling.v2; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2"; + +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ContainerResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; + + // container is the name of the container in the pods of the scaling target + optional string container = 3; +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ContainerResourceMetricStatus { + // Name is the name of the resource in question. + optional string name = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; + + // Container is the name of the container in the pods of the scaling target + optional string container = 3; +} + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 2; + + // API version of the referent + // +optional + optional string apiVersion = 3; +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +message ExternalMetricSource { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +message ExternalMetricStatus { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; +} + +// HPAScalingPolicy is a single policy which must hold true for a specified past interval. +message HPAScalingPolicy { + // Type is used to specify the scaling policy. + optional string type = 1; + + // Value contains the amount of change which is permitted by the policy. + // It must be greater than zero + optional int32 value = 2; + + // PeriodSeconds specifies the window of time for which the policy should hold true. + // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + optional int32 periodSeconds = 3; +} + +// HPAScalingRules configures the scaling behavior for one direction. +// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. +// They can limit the scaling velocity by specifying scaling policies. +// They can prevent flapping by specifying the stabilization window, so that the +// number of replicas is not set instantly, instead, the safest value from the stabilization +// window is chosen. +message HPAScalingRules { + // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // considered while scaling up or scaling down. + // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). + // If not set, use the default values: + // - For scale up: 0 (i.e. no stabilization is done). + // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). + // +optional + optional int32 stabilizationWindowSeconds = 3; + + // selectPolicy is used to specify which policy should be used. + // If not set, the default value Max is used. + // +optional + optional string selectPolicy = 1; + + // policies is a list of potential scaling polices which can be used during scaling. + // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + // +listType=atomic + // +optional + repeated HPAScalingPolicy policies = 2; +} + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +message HorizontalPodAutoscaler { + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + optional HorizontalPodAutoscalerSpec spec = 2; + + // status is the current information about the autoscaler. + // +optional + optional HorizontalPodAutoscalerStatus status = 3; +} + +// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target +// in both Up and Down directions (scaleUp and scaleDown fields respectively). +message HorizontalPodAutoscalerBehavior { + // scaleUp is scaling policy for scaling Up. + // If not set, the default value is the higher of: + // * increase no more than 4 pods per 60 seconds + // * double the number of pods per 60 seconds + // No stabilization is used. + // +optional + optional HPAScalingRules scaleUp = 1; + + // scaleDown is scaling policy for scaling Down. + // If not set, the default value is to allow to scale down to minReplicas pods, with a + // 300 second stabilization window (i.e., the highest recommendation for + // the last 300sec is used). + // +optional + optional HPAScalingRules scaleDown = 2; +} + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +message HorizontalPodAutoscalerCondition { + // type describes the current condition + optional string type = 1; + + // status is the status of the condition (True, False, Unknown) + optional string status = 2; + + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is the reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable explanation containing details about + // the transition + // +optional + optional string message = 5; +} + +// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // metadata is the standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +message HorizontalPodAutoscalerSpec { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + optional int32 minReplicas = 2; + + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + optional int32 maxReplicas = 3; + + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // If not set, the default metric will be set to 80% average CPU utilization. + // +listType=atomic + // +optional + repeated MetricSpec metrics = 4; + + // behavior configures the scaling behavior of the target + // in both Up and Down directions (scaleUp and scaleDown fields respectively). + // If not set, the default HPAScalingRules for scale up and scale down are used. + // +optional + optional HorizontalPodAutoscalerBehavior behavior = 5; +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +message HorizontalPodAutoscalerStatus { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + optional int64 observedGeneration = 1; + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + // +optional + optional int32 currentReplicas = 3; + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + optional int32 desiredReplicas = 4; + + // currentMetrics is the last read state of the metrics used by this autoscaler. + // +listType=atomic + // +optional + repeated MetricStatus currentMetrics = 5; + + // conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + repeated HorizontalPodAutoscalerCondition conditions = 6; +} + +// MetricIdentifier defines the name and optionally selector for a metric +message MetricIdentifier { + // name is the name of the given metric + optional string name = 1; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; + + // containerResource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + optional ContainerResourceMetricSource containerResource = 7; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricSource external = 5; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; + + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ContainerResourceMetricStatus containerResource = 7; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricStatus external = 5; +} + +// MetricTarget defines the target value, average value, or average utilization of a specific metric +message MetricTarget { + // type represents whether the metric type is Utilization, Value, or AverageValue + optional string type = 1; + + // value is the target value of the metric (as a quantity). + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + + // averageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // Currently only valid for Resource metric source type + // +optional + optional int32 averageUtilization = 4; +} + +// MetricValueStatus holds the current value for a metric +message MetricValueStatus { + // value is the current value of the metric (as a quantity). + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + + // averageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 averageUtilization = 3; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + // describedObject specifies the descriptions of a object,such as kind,name apiVersion + optional CrossVersionObjectReference describedObject = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; + + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 3; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; + + // DescribedObject specifies the descriptions of a object,such as kind,name apiVersion + optional CrossVersionObjectReference describedObject = 3; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // Name is the name of the resource in question. + optional string name = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; +} + diff --git a/vendor/k8s.io/api/autoscaling/v2/register.go b/vendor/k8s.io/api/autoscaling/v2/register.go new file mode 100644 index 000000000..20f9ea5bb --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go new file mode 100644 index 000000000..9931f6146 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/types.go @@ -0,0 +1,545 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +package v2 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // If not set, the default metric will be set to 80% average CPU utilization. + // +listType=atomic + // +optional + Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` + + // behavior configures the scaling behavior of the target + // in both Up and Down directions (scaleUp and scaleDown fields respectively). + // If not set, the default HPAScalingRules for scale up and scale down are used. + // +optional + Behavior *HorizontalPodAutoscalerBehavior `json:"behavior,omitempty" protobuf:"bytes,5,opt,name=behavior"` +} + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each mapping to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // containerResource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in + // each pod of the current scale target (e.g. CPU or memory). Such metrics are + // built in to Kubernetes, and have special scaling options on top of those + // available to normal per-pod metrics using the "pods" source. + // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. + // +optional + ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target +// in both Up and Down directions (scaleUp and scaleDown fields respectively). +type HorizontalPodAutoscalerBehavior struct { + // scaleUp is scaling policy for scaling Up. + // If not set, the default value is the higher of: + // * increase no more than 4 pods per 60 seconds + // * double the number of pods per 60 seconds + // No stabilization is used. + // +optional + ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. + // If not set, the default value is to allow to scale down to minReplicas pods, with a + // 300 second stabilization window (i.e., the highest recommendation for + // the last 300sec is used). + // +optional + ScaleDown *HPAScalingRules `json:"scaleDown,omitempty" protobuf:"bytes,2,opt,name=scaleDown"` +} + +// ScalingPolicySelect is used to specify which policy should be used while scaling in a certain direction +type ScalingPolicySelect string + +const ( + // MaxChangePolicySelect selects the policy with the highest possible change. + MaxChangePolicySelect ScalingPolicySelect = "Max" + // MinChangePolicySelect selects the policy with the lowest possible change. + MinChangePolicySelect ScalingPolicySelect = "Min" + // DisabledPolicySelect disables the scaling in this direction. + DisabledPolicySelect ScalingPolicySelect = "Disabled" +) + +// HPAScalingRules configures the scaling behavior for one direction. +// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. +// They can limit the scaling velocity by specifying scaling policies. +// They can prevent flapping by specifying the stabilization window, so that the +// number of replicas is not set instantly, instead, the safest value from the stabilization +// window is chosen. +type HPAScalingRules struct { + // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // considered while scaling up or scaling down. + // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). + // If not set, use the default values: + // - For scale up: 0 (i.e. no stabilization is done). + // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). + // +optional + StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. + // If not set, the default value Max is used. + // +optional + SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. + // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + // +listType=atomic + // +optional + Policies []HPAScalingPolicy `json:"policies,omitempty" listType:"atomic" protobuf:"bytes,2,rep,name=policies"` +} + +// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions. +type HPAScalingPolicyType string + +const ( + // PodsScalingPolicy is a policy used to specify a change in absolute number of pods. + PodsScalingPolicy HPAScalingPolicyType = "Pods" + // PercentScalingPolicy is a policy used to specify a relative amount of change with respect to + // the current number of pods. + PercentScalingPolicy HPAScalingPolicyType = "Percent" +) + +// HPAScalingPolicy is a single policy which must hold true for a specified past interval. +type HPAScalingPolicy struct { + // Type is used to specify the scaling policy. + Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` + // Value contains the amount of change which is permitted by the policy. + // It must be greater than zero + Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` + // PeriodSeconds specifies the window of time for which the policy should hold true. + // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +const ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" + // ContainerResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing a single container in each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ContainerResourceMetricSourceType MetricSourceType = "ContainerResource" + // ExternalMetricSourceType is a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + ExternalMetricSourceType MetricSourceType = "External" +) + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // describedObject specifies the descriptions of a object,such as kind,name apiVersion + DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` +} + +// ContainerResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ContainerResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,3,opt,name=container"` +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +type ExternalMetricSource struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` +} + +// MetricIdentifier defines the name and optionally selector for a metric +type MetricIdentifier struct { + // name is the name of the given metric + Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,name=selector"` +} + +// MetricTarget defines the target value, average value, or average utilization of a specific metric +type MetricTarget struct { + // type represents whether the metric type is Utilization, Value, or AverageValue + Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). + // +optional + Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // Currently only valid for Resource metric source type + // +optional + AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,4,opt,name=averageUtilization"` +} + +// MetricTargetType specifies the type of metric being targeted, and should be either +// "Value", "AverageValue", or "Utilization" +type MetricTargetType string + +const ( + // UtilizationMetricType declares a MetricTarget is an AverageUtilization value + UtilizationMetricType MetricTargetType = "Utilization" + // ValueMetricType declares a MetricTarget is a raw value + ValueMetricType MetricTargetType = "Value" + // AverageValueMetricType declares a MetricTarget is an + AverageValueMetricType MetricTargetType = "AverageValue" +) + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + // +optional + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,3,opt,name=currentReplicas"` + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // currentMetrics is the last read state of the metrics used by this autoscaler. + // +listType=atomic + // +optional + CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` + + // conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []HorizontalPodAutoscalerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" listType:"map" protobuf:"bytes,6,rep,name=conditions"` +} + +// HorizontalPodAutoscalerConditionType are the valid conditions of +// a HorizontalPodAutoscaler. +type HorizontalPodAutoscalerConditionType string + +const ( + // ScalingActive indicates that the HPA controller is able to scale if necessary: + // it's correctly configured, can fetch the desired metrics, and isn't disabled. + ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, + // such as being in a backoff window, or being unable to access/update the target scale. + AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" + // ScalingLimited indicates that the calculated scale based on metrics would be above or + // below the range for the HPA, and has thus been capped. + ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" +) + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // type describes the current condition + Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will be one of "ContainerResource", "External", + // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. + // Note: "ContainerResource" type is available on when the feature-gate + // HPAContainerMetrics is enabled + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing a single container in each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // DescribedObject specifies the descriptions of a object,such as kind,name apiVersion + DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,3,name=describedObject"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // Name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` +} + +// ContainerResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing a single container in each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ContainerResourceMetricStatus struct { + // Name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // Container is the name of the container in the pods of the scaling target + Container string `json:"container" protobuf:"bytes,3,opt,name=container"` +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +type ExternalMetricStatus struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` +} + +// MetricValueStatus holds the current value for a metric +type MetricValueStatus struct { + // value is the current value of the metric (as a quantity). + // +optional + Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,3,opt,name=averageUtilization"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go new file mode 100644 index 000000000..05355a552 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -0,0 +1,299 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourceMetricSource = map[string]string{ + "": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "target": "target specifies the target value for the given metric", + "container": "container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricSource +} + +var map_ContainerResourceMetricStatus = map[string]string{ + "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "Name is the name of the resource in question.", + "current": "current contains the current value for the given metric", + "container": "Container is the name of the container in the pods of the scaling target", +} + +func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ContainerResourceMetricStatus +} + +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_ExternalMetricSource = map[string]string{ + "": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "metric": "metric identifies the target metric by name and selector", + "target": "target specifies the target value for the given metric", +} + +func (ExternalMetricSource) SwaggerDoc() map[string]string { + return map_ExternalMetricSource +} + +var map_ExternalMetricStatus = map[string]string{ + "": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", + "metric": "metric identifies the target metric by name and selector", + "current": "current contains the current value for the given metric", +} + +func (ExternalMetricStatus) SwaggerDoc() map[string]string { + return map_ExternalMetricStatus +} + +var map_HPAScalingPolicy = map[string]string{ + "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", + "type": "Type is used to specify the scaling policy.", + "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", +} + +func (HPAScalingPolicy) SwaggerDoc() map[string]string { + return map_HPAScalingPolicy +} + +var map_HPAScalingRules = map[string]string{ + "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", + "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", + "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", +} + +func (HPAScalingRules) SwaggerDoc() map[string]string { + return map_HPAScalingRules +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerBehavior = map[string]string{ + "": "HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).", + "scaleUp": "scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of:\n * increase no more than 4 pods per 60 seconds\n * double the number of pods per 60 seconds\nNo stabilization is used.", + "scaleDown": "scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).", +} + +func (HorizontalPodAutoscalerBehavior) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerBehavior +} + +var map_HorizontalPodAutoscalerCondition = map[string]string{ + "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", + "type": "type describes the current condition", + "status": "status is the status of the condition (True, False, Unknown)", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", + "reason": "reason is the reason for the condition's last transition.", + "message": "message is a human-readable explanation containing details about the transition", +} + +func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerCondition +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.", + "metadata": "metadata is the standard list metadata.", + "items": "items is the list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", + "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", + "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", + "behavior": "behavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). If not set, the default HPAScalingRules for scale up and scale down are used.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", + "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", + "conditions": "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricIdentifier = map[string]string{ + "": "MetricIdentifier defines the name and optionally selector for a metric", + "name": "name is the name of the given metric", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", +} + +func (MetricIdentifier) SwaggerDoc() map[string]string { + return map_MetricIdentifier +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_MetricTarget = map[string]string{ + "": "MetricTarget defines the target value, average value, or average utilization of a specific metric", + "type": "type represents whether the metric type is Utilization, Value, or AverageValue", + "value": "value is the target value of the metric (as a quantity).", + "averageValue": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)", + "averageUtilization": "averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type", +} + +func (MetricTarget) SwaggerDoc() map[string]string { + return map_MetricTarget +} + +var map_MetricValueStatus = map[string]string{ + "": "MetricValueStatus holds the current value for a metric", + "value": "value is the current value of the metric (as a quantity).", + "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", + "averageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", +} + +func (MetricValueStatus) SwaggerDoc() map[string]string { + return map_MetricValueStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "describedObject": "describedObject specifies the descriptions of a object,such as kind,name apiVersion", + "target": "target specifies the target value for the given metric", + "metric": "metric identifies the target metric by name and selector", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "metric": "metric identifies the target metric by name and selector", + "current": "current contains the current value for the given metric", + "describedObject": "DescribedObject specifies the descriptions of a object,such as kind,name apiVersion", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metric": "metric identifies the target metric by name and selector", + "target": "target specifies the target value for the given metric", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metric": "metric identifies the target metric by name and selector", + "current": "current contains the current value for the given metric", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "target": "target specifies the target value for the given metric", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "Name is the name of the resource in question.", + "current": "current contains the current value for the given metric", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go new file mode 100644 index 000000000..125708d6f --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go @@ -0,0 +1,610 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource. +func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource { + if in == nil { + return nil + } + out := new(ContainerResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) { + *out = *in + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus. +func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus { + if in == nil { + return nil + } + out := new(ContainerResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. +func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { + if in == nil { + return nil + } + out := new(CrossVersionObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. +func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { + if in == nil { + return nil + } + out := new(ExternalMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. +func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { + if in == nil { + return nil + } + out := new(ExternalMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPAScalingPolicy) DeepCopyInto(out *HPAScalingPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingPolicy. +func (in *HPAScalingPolicy) DeepCopy() *HPAScalingPolicy { + if in == nil { + return nil + } + out := new(HPAScalingPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) { + *out = *in + if in.StabilizationWindowSeconds != nil { + in, out := &in.StabilizationWindowSeconds, &out.StabilizationWindowSeconds + *out = new(int32) + **out = **in + } + if in.SelectPolicy != nil { + in, out := &in.SelectPolicy, &out.SelectPolicy + *out = new(ScalingPolicySelect) + **out = **in + } + if in.Policies != nil { + in, out := &in.Policies, &out.Policies + *out = make([]HPAScalingPolicy, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingRules. +func (in *HPAScalingRules) DeepCopy() *HPAScalingRules { + if in == nil { + return nil + } + out := new(HPAScalingRules) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. +func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerBehavior) DeepCopyInto(out *HorizontalPodAutoscalerBehavior) { + *out = *in + if in.ScaleUp != nil { + in, out := &in.ScaleUp, &out.ScaleUp + *out = new(HPAScalingRules) + (*in).DeepCopyInto(*out) + } + if in.ScaleDown != nil { + in, out := &in.ScaleDown, &out.ScaleDown + *out = new(HPAScalingRules) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerBehavior. +func (in *HorizontalPodAutoscalerBehavior) DeepCopy() *HorizontalPodAutoscalerBehavior { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerBehavior) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. +func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. +func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { + *out = *in + out.ScaleTargetRef = in.ScaleTargetRef + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(HorizontalPodAutoscalerBehavior) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. +func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = (*in).DeepCopy() + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]HorizontalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. +func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricIdentifier) DeepCopyInto(out *MetricIdentifier) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricIdentifier. +func (in *MetricIdentifier) DeepCopy() *MetricIdentifier { + if in == nil { + return nil + } + out := new(MetricIdentifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricSource) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.ContainerResource != nil { + in, out := &in.ContainerResource, &out.ContainerResource + *out = new(ContainerResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricTarget) DeepCopyInto(out *MetricTarget) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + x := (*in).DeepCopy() + *out = &x + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + if in.AverageUtilization != nil { + in, out := &in.AverageUtilization, &out.AverageUtilization + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTarget. +func (in *MetricTarget) DeepCopy() *MetricTarget { + if in == nil { + return nil + } + out := new(MetricTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricValueStatus) DeepCopyInto(out *MetricValueStatus) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + x := (*in).DeepCopy() + *out = &x + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + if in.AverageUtilization != nil { + in, out := &in.AverageUtilization, &out.AverageUtilization + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricValueStatus. +func (in *MetricValueStatus) DeepCopy() *MetricValueStatus { + if in == nil { + return nil + } + out := new(MetricValueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { + *out = *in + out.DescribedObject = in.DescribedObject + in.Target.DeepCopyInto(&out.Target) + in.Metric.DeepCopyInto(&out.Metric) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. +func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { + if in == nil { + return nil + } + out := new(ObjectMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + out.DescribedObject = in.DescribedObject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. +func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { + if in == nil { + return nil + } + out := new(ObjectMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. +func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { + if in == nil { + return nil + } + out := new(PodsMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. +func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { + if in == nil { + return nil + } + out := new(PodsMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. +func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { + if in == nil { + return nil + } + out := new(ResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { + *out = *in + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. +func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { + if in == nil { + return nil + } + out := new(ResourceMetricStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 5ad55fa72..27f9ab45c 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -248,6 +248,7 @@ message HorizontalPodAutoscalerStatus { // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. + // +optional repeated HorizontalPodAutoscalerCondition conditions = 6; } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 05023d9bc..c49c5e0c5 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -264,6 +264,7 @@ type HorizontalPodAutoscalerStatus struct { // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. + // +optional Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index f10c7884d..610e81f81 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go index f6baef699..67d38d051 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 77a6cb379..7dee14470 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -272,6 +272,7 @@ message HorizontalPodAutoscalerStatus { // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. + // +optional repeated HorizontalPodAutoscalerCondition conditions = 6; } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 1be548d0a..7619f1e00 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -373,6 +373,7 @@ type HorizontalPodAutoscalerStatus struct { // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. + // +optional Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index 81642822a..49838843c 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go index c77892c99..b8e49bc51 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index b8a102831..a33edf5b7 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -375,95 +375,96 @@ func init() { } var fileDescriptor_3b52da57c93de713 = []byte{ - // 1395 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0xc6, 0x76, 0x6c, 0x8f, 0x93, 0xd4, 0x9d, 0x7e, 0xdb, 0xfa, 0x6b, 0x2a, 0x6f, 0x6a, + // 1413 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x41, 0x6f, 0x1b, 0xc5, + 0x17, 0xcf, 0x26, 0x71, 0x62, 0x8f, 0x93, 0xd4, 0x9d, 0xfe, 0xdb, 0xfa, 0x6f, 0x2a, 0x6f, 0x6a, 0x0a, 0x0a, 0xa8, 0xac, 0x49, 0x88, 0x10, 0x42, 0x80, 0x94, 0x4d, 0x55, 0x68, 0x70, 0xd4, 0x30, - 0x76, 0x84, 0x04, 0x05, 0xb1, 0xde, 0x1d, 0x3b, 0xdb, 0xec, 0xee, 0x58, 0x3b, 0x63, 0x8b, 0xdc, - 0x90, 0xf8, 0x07, 0xf8, 0x2b, 0x10, 0x27, 0x84, 0x04, 0x67, 0x8e, 0xa8, 0xc7, 0x1e, 0x7b, 0x5a, - 0xd1, 0xe5, 0x0f, 0x80, 0x73, 0xb8, 0xa0, 0x9d, 0x1d, 0xef, 0x0f, 0x7b, 0x37, 0xa4, 0x3d, 0x54, - 0xdc, 0xbc, 0x6f, 0x3e, 0x9f, 0xcf, 0xbc, 0x99, 0xf7, 0xe6, 0xbd, 0x67, 0xf0, 0xde, 0xc9, 0x3b, - 0x54, 0x31, 0x49, 0xe7, 0x64, 0x32, 0xc0, 0xae, 0x83, 0x19, 0xa6, 0x9d, 0x29, 0x76, 0x0c, 0xe2, - 0x76, 0xc4, 0x82, 0x36, 0x36, 0x3b, 0x03, 0x8d, 0xe9, 0xc7, 0x9d, 0xe9, 0x56, 0x67, 0x84, 0x1d, - 0xec, 0x6a, 0x0c, 0x1b, 0xca, 0xd8, 0x25, 0x8c, 0xc0, 0x2b, 0x21, 0x48, 0xd1, 0xc6, 0xa6, 0xc2, - 0x41, 0xca, 0x74, 0xab, 0xf9, 0xc6, 0xc8, 0x64, 0xc7, 0x93, 0x81, 0xa2, 0x13, 0xbb, 0x33, 0x22, - 0x23, 0xd2, 0xe1, 0xd8, 0xc1, 0x64, 0xc8, 0xbf, 0xf8, 0x07, 0xff, 0x15, 0x6a, 0x34, 0xdb, 0x89, - 0x8d, 0x74, 0xe2, 0xe2, 0x8c, 0x7d, 0x9a, 0x3b, 0x31, 0xc6, 0xd6, 0xf4, 0x63, 0xd3, 0xc1, 0xee, - 0x69, 0x67, 0x7c, 0x32, 0x0a, 0x0c, 0xb4, 0x63, 0x63, 0xa6, 0x65, 0xb1, 0x3a, 0x79, 0x2c, 0x77, - 0xe2, 0x30, 0xd3, 0xc6, 0x0b, 0x84, 0xb7, 0xff, 0x8d, 0x40, 0xf5, 0x63, 0x6c, 0x6b, 0xf3, 0xbc, - 0xf6, 0xdf, 0x12, 0x28, 0xef, 0xb9, 0xc4, 0xd9, 0x27, 0x03, 0xf8, 0x15, 0xa8, 0x04, 0xfe, 0x18, - 0x1a, 0xd3, 0x1a, 0xd2, 0x86, 0xb4, 0x59, 0xdb, 0x7e, 0x53, 0x89, 0x6f, 0x29, 0x92, 0x55, 0xc6, - 0x27, 0xa3, 0xc0, 0x40, 0x95, 0x00, 0xad, 0x4c, 0xb7, 0x94, 0xfb, 0x83, 0x87, 0x58, 0x67, 0x07, - 0x98, 0x69, 0x2a, 0x7c, 0xe4, 0xc9, 0x4b, 0xbe, 0x27, 0x83, 0xd8, 0x86, 0x22, 0x55, 0xa8, 0x82, - 0x22, 0x1d, 0x63, 0xbd, 0xb1, 0xcc, 0xd5, 0x37, 0x94, 0x8c, 0x18, 0x28, 0xc2, 0x9b, 0xde, 0x18, - 0xeb, 0xea, 0xaa, 0x50, 0x2b, 0x06, 0x5f, 0x88, 0x73, 0xe1, 0x3e, 0x58, 0xa1, 0x4c, 0x63, 0x13, - 0xda, 0x28, 0x70, 0x95, 0xf6, 0xb9, 0x2a, 0x1c, 0xa9, 0xae, 0x0b, 0x9d, 0x95, 0xf0, 0x1b, 0x09, - 0x85, 0xf6, 0x8f, 0x12, 0xa8, 0x09, 0x64, 0xd7, 0xa4, 0x0c, 0x3e, 0x58, 0xb8, 0x01, 0xe5, 0x62, - 0x37, 0x10, 0xb0, 0xf9, 0xf9, 0xeb, 0x62, 0xa7, 0xca, 0xcc, 0x92, 0x38, 0xfd, 0x2e, 0x28, 0x99, - 0x0c, 0xdb, 0xb4, 0xb1, 0xbc, 0x51, 0xd8, 0xac, 0x6d, 0xdf, 0x38, 0xcf, 0x71, 0x75, 0x4d, 0x08, - 0x95, 0xee, 0x05, 0x14, 0x14, 0x32, 0xdb, 0x3f, 0x14, 0x23, 0x87, 0x83, 0x2b, 0x81, 0xb7, 0x41, - 0x25, 0x08, 0xac, 0x31, 0xb1, 0x30, 0x77, 0xb8, 0x1a, 0x3b, 0xd0, 0x13, 0x76, 0x14, 0x21, 0xe0, - 0x11, 0xb8, 0x4e, 0x99, 0xe6, 0x32, 0xd3, 0x19, 0xdd, 0xc1, 0x9a, 0x61, 0x99, 0x0e, 0xee, 0x61, - 0x9d, 0x38, 0x06, 0xe5, 0x11, 0x29, 0xa8, 0x2f, 0xf9, 0x9e, 0x7c, 0xbd, 0x97, 0x0d, 0x41, 0x79, - 0x5c, 0xf8, 0x00, 0x5c, 0xd6, 0x89, 0xa3, 0x4f, 0x5c, 0x17, 0x3b, 0xfa, 0xe9, 0x21, 0xb1, 0x4c, - 0xfd, 0x94, 0x07, 0xa7, 0xaa, 0x2a, 0xc2, 0x9b, 0xcb, 0x7b, 0xf3, 0x80, 0xb3, 0x2c, 0x23, 0x5a, - 0x14, 0x82, 0xaf, 0x80, 0x32, 0x9d, 0xd0, 0x31, 0x76, 0x8c, 0x46, 0x71, 0x43, 0xda, 0xac, 0xa8, - 0x35, 0xdf, 0x93, 0xcb, 0xbd, 0xd0, 0x84, 0x66, 0x6b, 0xf0, 0x73, 0x50, 0x7b, 0x48, 0x06, 0x7d, - 0x6c, 0x8f, 0x2d, 0x8d, 0xe1, 0x46, 0x89, 0x47, 0xef, 0x56, 0xe6, 0x15, 0xef, 0xc7, 0x38, 0x9e, - 0x65, 0x57, 0x84, 0x93, 0xb5, 0xc4, 0x02, 0x4a, 0xaa, 0xc1, 0x2f, 0x41, 0x93, 0x4e, 0x74, 0x1d, - 0x53, 0x3a, 0x9c, 0x58, 0xfb, 0x64, 0x40, 0x3f, 0x32, 0x29, 0x23, 0xee, 0x69, 0xd7, 0xb4, 0x4d, - 0xd6, 0x58, 0xd9, 0x90, 0x36, 0x4b, 0x6a, 0xcb, 0xf7, 0xe4, 0x66, 0x2f, 0x17, 0x85, 0xce, 0x51, - 0x80, 0x08, 0x5c, 0x1b, 0x6a, 0xa6, 0x85, 0x8d, 0x05, 0xed, 0x32, 0xd7, 0x6e, 0xfa, 0x9e, 0x7c, - 0xed, 0x6e, 0x26, 0x02, 0xe5, 0x30, 0xdb, 0xbf, 0x2e, 0x83, 0xb5, 0xd4, 0x2b, 0x80, 0x1f, 0x83, - 0x15, 0x4d, 0x67, 0xe6, 0x34, 0x48, 0x95, 0x20, 0x01, 0x5f, 0x4e, 0xde, 0x4e, 0x50, 0xbf, 0xe2, - 0xb7, 0x8c, 0xf0, 0x10, 0x07, 0x41, 0xc0, 0xf1, 0xd3, 0xd9, 0xe5, 0x54, 0x24, 0x24, 0xa0, 0x05, - 0xea, 0x96, 0x46, 0xd9, 0x2c, 0xcb, 0xfa, 0xa6, 0x8d, 0x79, 0x7c, 0x6a, 0xdb, 0xaf, 0x5f, 0xec, - 0xc9, 0x04, 0x0c, 0xf5, 0x7f, 0xbe, 0x27, 0xd7, 0xbb, 0x73, 0x3a, 0x68, 0x41, 0x19, 0xba, 0x00, - 0x72, 0x5b, 0x74, 0x85, 0x7c, 0xbf, 0xd2, 0x33, 0xef, 0x77, 0xcd, 0xf7, 0x64, 0xd8, 0x5d, 0x50, - 0x42, 0x19, 0xea, 0xed, 0x3f, 0x25, 0x50, 0x78, 0x31, 0x65, 0xf1, 0x83, 0x54, 0x59, 0xbc, 0x91, - 0x97, 0xb4, 0xb9, 0x25, 0xf1, 0xee, 0x5c, 0x49, 0x6c, 0xe5, 0x2a, 0x9c, 0x5f, 0x0e, 0x7f, 0x2b, - 0x80, 0xd5, 0x7d, 0x32, 0xd8, 0x23, 0x8e, 0x61, 0x32, 0x93, 0x38, 0x70, 0x07, 0x14, 0xd9, 0xe9, - 0x78, 0x56, 0x5a, 0x36, 0x66, 0x5b, 0xf7, 0x4f, 0xc7, 0xf8, 0xcc, 0x93, 0xeb, 0x49, 0x6c, 0x60, - 0x43, 0x1c, 0x0d, 0xbb, 0x91, 0x3b, 0xcb, 0x9c, 0xb7, 0x93, 0xde, 0xee, 0xcc, 0x93, 0x33, 0x1a, - 0xa7, 0x12, 0x29, 0xa5, 0x9d, 0x82, 0x23, 0xb0, 0x16, 0x04, 0xe7, 0xd0, 0x25, 0x83, 0x30, 0xcb, - 0x0a, 0xcf, 0x1c, 0xf5, 0xab, 0xc2, 0x81, 0xb5, 0x6e, 0x52, 0x08, 0xa5, 0x75, 0xe1, 0x34, 0xcc, - 0xb1, 0xbe, 0xab, 0x39, 0x34, 0x3c, 0xd2, 0xf3, 0xe5, 0x74, 0x53, 0xec, 0xc6, 0xf3, 0x2c, 0xad, - 0x86, 0x32, 0x76, 0x80, 0xaf, 0x82, 0x15, 0x17, 0x6b, 0x94, 0x38, 0x3c, 0x9f, 0xab, 0x71, 0x74, - 0x10, 0xb7, 0x22, 0xb1, 0x0a, 0x5f, 0x03, 0x65, 0x1b, 0x53, 0xaa, 0x8d, 0x30, 0xaf, 0x38, 0x55, - 0xf5, 0x92, 0x00, 0x96, 0x0f, 0x42, 0x33, 0x9a, 0xad, 0xb7, 0xbf, 0x97, 0x40, 0xf9, 0xc5, 0xf4, - 0xb4, 0xf7, 0xd3, 0x3d, 0xad, 0x91, 0x97, 0x79, 0x39, 0xfd, 0xec, 0xa7, 0x12, 0x77, 0x94, 0xf7, - 0xb2, 0x2d, 0x50, 0x1b, 0x6b, 0xae, 0x66, 0x59, 0xd8, 0x32, 0xa9, 0xcd, 0x7d, 0x2d, 0xa9, 0x97, - 0x82, 0xba, 0x7c, 0x18, 0x9b, 0x51, 0x12, 0x13, 0x50, 0x74, 0x62, 0x8f, 0x2d, 0x1c, 0x5c, 0x66, - 0x98, 0x6e, 0x82, 0xb2, 0x17, 0x9b, 0x51, 0x12, 0x03, 0xef, 0x83, 0xab, 0x61, 0x05, 0x9b, 0xef, - 0x80, 0x05, 0xde, 0x01, 0xff, 0xef, 0x7b, 0xf2, 0xd5, 0xdd, 0x2c, 0x00, 0xca, 0xe6, 0xc1, 0x1d, - 0xb0, 0x3a, 0xd0, 0xf4, 0x13, 0x32, 0x1c, 0x26, 0x2b, 0x76, 0xdd, 0xf7, 0xe4, 0x55, 0x35, 0x61, - 0x47, 0x29, 0x14, 0xfc, 0x02, 0x54, 0x28, 0xb6, 0xb0, 0xce, 0x88, 0x2b, 0x52, 0xec, 0xad, 0x0b, - 0x46, 0x45, 0x1b, 0x60, 0xab, 0x27, 0xa8, 0xea, 0x2a, 0xef, 0xf4, 0xe2, 0x0b, 0x45, 0x92, 0xf0, - 0x5d, 0xb0, 0x6e, 0x6b, 0xce, 0x44, 0x8b, 0x90, 0x3c, 0xb7, 0x2a, 0x2a, 0xf4, 0x3d, 0x79, 0xfd, - 0x20, 0xb5, 0x82, 0xe6, 0x90, 0xf0, 0x13, 0x50, 0x61, 0xb3, 0x36, 0xba, 0xc2, 0x5d, 0xcb, 0x6c, - 0x14, 0x87, 0xc4, 0x48, 0x75, 0xd1, 0x28, 0x4b, 0xa2, 0x16, 0x1a, 0xc9, 0x04, 0x83, 0x07, 0x63, - 0x96, 0xb8, 0xb1, 0xdd, 0x21, 0xc3, 0xee, 0x5d, 0xd3, 0x31, 0xe9, 0x31, 0x36, 0x1a, 0x15, 0x7e, - 0x5d, 0x7c, 0xf0, 0xe8, 0xf7, 0xbb, 0x59, 0x10, 0x94, 0xc7, 0x85, 0x5d, 0xb0, 0x1e, 0x87, 0xf6, - 0x80, 0x18, 0xb8, 0x51, 0xe5, 0x0f, 0xe3, 0x56, 0x70, 0xca, 0xbd, 0xd4, 0xca, 0xd9, 0x82, 0x05, - 0xcd, 0x71, 0x93, 0x83, 0x06, 0xc8, 0x1f, 0x34, 0xda, 0x7f, 0x15, 0x41, 0x35, 0xee, 0xa9, 0x47, - 0x00, 0xe8, 0xb3, 0xc2, 0x45, 0x45, 0x5f, 0xbd, 0x99, 0xf7, 0x08, 0xa2, 0x12, 0x17, 0xf7, 0x83, - 0xc8, 0x44, 0x51, 0x42, 0x08, 0x7e, 0x0a, 0xaa, 0x7c, 0xda, 0xe2, 0x25, 0x68, 0xf9, 0x99, 0x4b, - 0xd0, 0x9a, 0xef, 0xc9, 0xd5, 0xde, 0x4c, 0x00, 0xc5, 0x5a, 0x70, 0x98, 0xbc, 0xb2, 0xe7, 0x2c, - 0xa7, 0x30, 0x7d, 0xbd, 0x7c, 0x8b, 0x39, 0xd5, 0xa0, 0xa8, 0x89, 0x59, 0xa3, 0xc8, 0x03, 0x9c, - 0x37, 0x46, 0x74, 0x40, 0x95, 0xcf, 0x45, 0xd8, 0xc0, 0x06, 0xcf, 0xd1, 0x92, 0x7a, 0x59, 0x40, - 0xab, 0xbd, 0xd9, 0x02, 0x8a, 0x31, 0x81, 0x70, 0x38, 0xf0, 0x88, 0xb1, 0x2b, 0x12, 0x0e, 0xc7, - 0x23, 0x24, 0x56, 0xe1, 0x1d, 0x50, 0x17, 0x2e, 0x61, 0xe3, 0x9e, 0x63, 0xe0, 0xaf, 0x31, 0xe5, - 0x4f, 0xb3, 0xaa, 0x36, 0x04, 0xa3, 0xbe, 0x37, 0xb7, 0x8e, 0x16, 0x18, 0xf0, 0x5b, 0x09, 0x5c, - 0x9f, 0x38, 0x3a, 0x99, 0x38, 0x0c, 0x1b, 0x7d, 0xec, 0xda, 0xa6, 0x13, 0xfc, 0x79, 0x3a, 0x24, - 0x06, 0xe5, 0x99, 0x5b, 0xdb, 0xbe, 0x9d, 0x19, 0xec, 0xa3, 0x6c, 0x4e, 0x98, 0xe7, 0x39, 0x8b, - 0x28, 0x6f, 0xa7, 0xf6, 0xcf, 0x12, 0xb8, 0x34, 0x37, 0xb4, 0xfe, 0xf7, 0xa7, 0x92, 0xf6, 0x2f, - 0x12, 0xc8, 0x3b, 0x2a, 0x3c, 0x4c, 0x86, 0x3d, 0x78, 0x35, 0x55, 0x75, 0x3b, 0x15, 0xf2, 0x33, - 0x4f, 0xbe, 0x99, 0xf7, 0x97, 0x36, 0x18, 0x32, 0xa8, 0x72, 0x74, 0xef, 0x4e, 0x32, 0x2f, 0x3e, - 0x8c, 0xf2, 0x62, 0x99, 0xcb, 0x75, 0xe2, 0x9c, 0xb8, 0x98, 0x96, 0xa0, 0xab, 0x9b, 0x8f, 0x9e, - 0xb6, 0x96, 0x1e, 0x3f, 0x6d, 0x2d, 0x3d, 0x79, 0xda, 0x5a, 0xfa, 0xc6, 0x6f, 0x49, 0x8f, 0xfc, - 0x96, 0xf4, 0xd8, 0x6f, 0x49, 0x4f, 0xfc, 0x96, 0xf4, 0xbb, 0xdf, 0x92, 0xbe, 0xfb, 0xa3, 0xb5, - 0xf4, 0xd9, 0xf2, 0x74, 0xeb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xfc, 0x0b, 0x48, 0x81, - 0x10, 0x00, 0x00, + 0x76, 0x84, 0x04, 0x05, 0xb1, 0xde, 0x1d, 0x3b, 0xdb, 0xec, 0xee, 0x58, 0x3b, 0x63, 0x0b, 0xdf, + 0x90, 0xf8, 0x02, 0xf0, 0x25, 0x10, 0x27, 0x84, 0x04, 0x67, 0x8e, 0xa8, 0xc7, 0x1e, 0x7b, 0x5a, + 0xd1, 0xe5, 0x03, 0x70, 0x0f, 0x17, 0x34, 0xb3, 0xe3, 0xdd, 0xb5, 0xbd, 0x1b, 0xd2, 0x1e, 0x2a, + 0x6e, 0xd9, 0x37, 0xbf, 0xf7, 0x9b, 0xe7, 0xf7, 0x7e, 0xf3, 0xde, 0x0b, 0x78, 0xef, 0xf4, 0x1d, + 0xaa, 0xd9, 0xa4, 0x79, 0x3a, 0xec, 0x62, 0xdf, 0xc3, 0x0c, 0xd3, 0xe6, 0x08, 0x7b, 0x16, 0xf1, + 0x9b, 0xf2, 0xc0, 0x18, 0xd8, 0xcd, 0xae, 0xc1, 0xcc, 0x93, 0xe6, 0x68, 0xbb, 0xd9, 0xc7, 0x1e, + 0xf6, 0x0d, 0x86, 0x2d, 0x6d, 0xe0, 0x13, 0x46, 0xe0, 0x95, 0x08, 0xa4, 0x19, 0x03, 0x5b, 0x13, + 0x20, 0x6d, 0xb4, 0x5d, 0x7b, 0xa3, 0x6f, 0xb3, 0x93, 0x61, 0x57, 0x33, 0x89, 0xdb, 0xec, 0x93, + 0x3e, 0x69, 0x0a, 0x6c, 0x77, 0xd8, 0x13, 0x5f, 0xe2, 0x43, 0xfc, 0x15, 0x71, 0xd4, 0x1a, 0xa9, + 0x8b, 0x4c, 0xe2, 0xe3, 0x8c, 0x7b, 0x6a, 0xbb, 0x09, 0xc6, 0x35, 0xcc, 0x13, 0xdb, 0xc3, 0xfe, + 0xb8, 0x39, 0x38, 0xed, 0x73, 0x03, 0x6d, 0xba, 0x98, 0x19, 0x59, 0x5e, 0xcd, 0x3c, 0x2f, 0x7f, + 0xe8, 0x31, 0xdb, 0xc5, 0x73, 0x0e, 0x6f, 0xff, 0x9b, 0x03, 0x35, 0x4f, 0xb0, 0x6b, 0xcc, 0xfa, + 0x35, 0xfe, 0x56, 0xc0, 0xea, 0xbe, 0x4f, 0xbc, 0x03, 0xd2, 0x85, 0x5f, 0x81, 0x22, 0x8f, 0xc7, + 0x32, 0x98, 0x51, 0x55, 0x36, 0x95, 0xad, 0xf2, 0xce, 0x9b, 0x5a, 0x92, 0xa5, 0x98, 0x56, 0x1b, + 0x9c, 0xf6, 0xb9, 0x81, 0x6a, 0x1c, 0xad, 0x8d, 0xb6, 0xb5, 0xfb, 0xdd, 0x87, 0xd8, 0x64, 0x87, + 0x98, 0x19, 0x3a, 0x7c, 0x14, 0xa8, 0x0b, 0x61, 0xa0, 0x82, 0xc4, 0x86, 0x62, 0x56, 0xa8, 0x83, + 0x65, 0x3a, 0xc0, 0x66, 0x75, 0x51, 0xb0, 0x6f, 0x6a, 0x19, 0x35, 0xd0, 0x64, 0x34, 0xed, 0x01, + 0x36, 0xf5, 0x35, 0xc9, 0xb6, 0xcc, 0xbf, 0x90, 0xf0, 0x85, 0x07, 0x60, 0x85, 0x32, 0x83, 0x0d, + 0x69, 0x75, 0x49, 0xb0, 0x34, 0xce, 0x65, 0x11, 0x48, 0x7d, 0x43, 0xf2, 0xac, 0x44, 0xdf, 0x48, + 0x32, 0x34, 0x7e, 0x52, 0x40, 0x59, 0x22, 0x5b, 0x36, 0x65, 0xf0, 0xc1, 0x5c, 0x06, 0xb4, 0x8b, + 0x65, 0x80, 0x7b, 0x8b, 0xdf, 0x5f, 0x91, 0x37, 0x15, 0x27, 0x96, 0xd4, 0xaf, 0xdf, 0x03, 0x05, + 0x9b, 0x61, 0x97, 0x56, 0x17, 0x37, 0x97, 0xb6, 0xca, 0x3b, 0x37, 0xce, 0x0b, 0x5c, 0x5f, 0x97, + 0x44, 0x85, 0x7b, 0xdc, 0x05, 0x45, 0x9e, 0x8d, 0x1f, 0x97, 0xe3, 0x80, 0x79, 0x4a, 0xe0, 0x6d, + 0x50, 0xe4, 0x85, 0xb5, 0x86, 0x0e, 0x16, 0x01, 0x97, 0x92, 0x00, 0xda, 0xd2, 0x8e, 0x62, 0x04, + 0x3c, 0x06, 0xd7, 0x29, 0x33, 0x7c, 0x66, 0x7b, 0xfd, 0x3b, 0xd8, 0xb0, 0x1c, 0xdb, 0xc3, 0x6d, + 0x6c, 0x12, 0xcf, 0xa2, 0xa2, 0x22, 0x4b, 0xfa, 0x4b, 0x61, 0xa0, 0x5e, 0x6f, 0x67, 0x43, 0x50, + 0x9e, 0x2f, 0x7c, 0x00, 0x2e, 0x9b, 0xc4, 0x33, 0x87, 0xbe, 0x8f, 0x3d, 0x73, 0x7c, 0x44, 0x1c, + 0xdb, 0x1c, 0x8b, 0xe2, 0x94, 0x74, 0x4d, 0x46, 0x73, 0x79, 0x7f, 0x16, 0x70, 0x96, 0x65, 0x44, + 0xf3, 0x44, 0xf0, 0x15, 0xb0, 0x4a, 0x87, 0x74, 0x80, 0x3d, 0xab, 0xba, 0xbc, 0xa9, 0x6c, 0x15, + 0xf5, 0x72, 0x18, 0xa8, 0xab, 0xed, 0xc8, 0x84, 0x26, 0x67, 0xf0, 0x73, 0x50, 0x7e, 0x48, 0xba, + 0x1d, 0xec, 0x0e, 0x1c, 0x83, 0xe1, 0x6a, 0x41, 0x54, 0xef, 0x56, 0x66, 0x8a, 0x0f, 0x12, 0x9c, + 0x50, 0xd9, 0x15, 0x19, 0x64, 0x39, 0x75, 0x80, 0xd2, 0x6c, 0xf0, 0x4b, 0x50, 0xa3, 0x43, 0xd3, + 0xc4, 0x94, 0xf6, 0x86, 0xce, 0x01, 0xe9, 0xd2, 0x8f, 0x6c, 0xca, 0x88, 0x3f, 0x6e, 0xd9, 0xae, + 0xcd, 0xaa, 0x2b, 0x9b, 0xca, 0x56, 0x41, 0xaf, 0x87, 0x81, 0x5a, 0x6b, 0xe7, 0xa2, 0xd0, 0x39, + 0x0c, 0x10, 0x81, 0x6b, 0x3d, 0xc3, 0x76, 0xb0, 0x35, 0xc7, 0xbd, 0x2a, 0xb8, 0x6b, 0x61, 0xa0, + 0x5e, 0xbb, 0x9b, 0x89, 0x40, 0x39, 0x9e, 0x8d, 0xdf, 0x16, 0xc1, 0xfa, 0xd4, 0x2b, 0x80, 0x1f, + 0x83, 0x15, 0xc3, 0x64, 0xf6, 0x88, 0x4b, 0x85, 0x0b, 0xf0, 0xe5, 0x74, 0x76, 0x78, 0xff, 0x4a, + 0xde, 0x32, 0xc2, 0x3d, 0xcc, 0x8b, 0x80, 0x93, 0xa7, 0xb3, 0x27, 0x5c, 0x91, 0xa4, 0x80, 0x0e, + 0xa8, 0x38, 0x06, 0x65, 0x13, 0x95, 0x75, 0x6c, 0x17, 0x8b, 0xfa, 0x94, 0x77, 0x5e, 0xbf, 0xd8, + 0x93, 0xe1, 0x1e, 0xfa, 0xff, 0xc2, 0x40, 0xad, 0xb4, 0x66, 0x78, 0xd0, 0x1c, 0x33, 0xf4, 0x01, + 0x14, 0xb6, 0x38, 0x85, 0xe2, 0xbe, 0xc2, 0x33, 0xdf, 0x77, 0x2d, 0x0c, 0x54, 0xd8, 0x9a, 0x63, + 0x42, 0x19, 0xec, 0x8d, 0xbf, 0x14, 0xb0, 0xf4, 0x62, 0xda, 0xe2, 0x07, 0x53, 0x6d, 0xf1, 0x46, + 0x9e, 0x68, 0x73, 0x5b, 0xe2, 0xdd, 0x99, 0x96, 0x58, 0xcf, 0x65, 0x38, 0xbf, 0x1d, 0xfe, 0xbe, + 0x04, 0xd6, 0x0e, 0x48, 0x77, 0x9f, 0x78, 0x96, 0xcd, 0x6c, 0xe2, 0xc1, 0x5d, 0xb0, 0xcc, 0xc6, + 0x83, 0x49, 0x6b, 0xd9, 0x9c, 0x5c, 0xdd, 0x19, 0x0f, 0xf0, 0x59, 0xa0, 0x56, 0xd2, 0x58, 0x6e, + 0x43, 0x02, 0x0d, 0x5b, 0x71, 0x38, 0x8b, 0xc2, 0x6f, 0x77, 0xfa, 0xba, 0xb3, 0x40, 0xcd, 0x18, + 0x9c, 0x5a, 0xcc, 0x34, 0x1d, 0x14, 0xec, 0x83, 0x75, 0x5e, 0x9c, 0x23, 0x9f, 0x74, 0x23, 0x95, + 0x2d, 0x3d, 0x73, 0xd5, 0xaf, 0xca, 0x00, 0xd6, 0x5b, 0x69, 0x22, 0x34, 0xcd, 0x0b, 0x47, 0x91, + 0xc6, 0x3a, 0xbe, 0xe1, 0xd1, 0xe8, 0x27, 0x3d, 0x9f, 0xa6, 0x6b, 0xf2, 0x36, 0xa1, 0xb3, 0x69, + 0x36, 0x94, 0x71, 0x03, 0x7c, 0x15, 0xac, 0xf8, 0xd8, 0xa0, 0xc4, 0x13, 0x7a, 0x2e, 0x25, 0xd5, + 0x41, 0xc2, 0x8a, 0xe4, 0x29, 0x7c, 0x0d, 0xac, 0xba, 0x98, 0x52, 0xa3, 0x8f, 0x45, 0xc7, 0x29, + 0xe9, 0x97, 0x24, 0x70, 0xf5, 0x30, 0x32, 0xa3, 0xc9, 0x79, 0xe3, 0x07, 0x05, 0xac, 0xbe, 0x98, + 0x99, 0xf6, 0xfe, 0xf4, 0x4c, 0xab, 0xe6, 0x29, 0x2f, 0x67, 0x9e, 0xfd, 0x5c, 0x10, 0x81, 0x8a, + 0x59, 0xb6, 0x0d, 0xca, 0x03, 0xc3, 0x37, 0x1c, 0x07, 0x3b, 0x36, 0x75, 0x45, 0xac, 0x05, 0xfd, + 0x12, 0xef, 0xcb, 0x47, 0x89, 0x19, 0xa5, 0x31, 0xdc, 0xc5, 0x24, 0xee, 0xc0, 0xc1, 0x3c, 0x99, + 0x91, 0xdc, 0xa4, 0xcb, 0x7e, 0x62, 0x46, 0x69, 0x0c, 0xbc, 0x0f, 0xae, 0x46, 0x1d, 0x6c, 0x76, + 0x02, 0x2e, 0x89, 0x09, 0xf8, 0xff, 0x30, 0x50, 0xaf, 0xee, 0x65, 0x01, 0x50, 0xb6, 0x1f, 0xdc, + 0x05, 0x6b, 0x5d, 0xc3, 0x3c, 0x25, 0xbd, 0x5e, 0xba, 0x63, 0x57, 0xc2, 0x40, 0x5d, 0xd3, 0x53, + 0x76, 0x34, 0x85, 0x82, 0x5f, 0x80, 0x22, 0xc5, 0x0e, 0x36, 0x19, 0xf1, 0xa5, 0xc4, 0xde, 0xba, + 0x60, 0x55, 0x8c, 0x2e, 0x76, 0xda, 0xd2, 0x55, 0x5f, 0x13, 0x93, 0x5e, 0x7e, 0xa1, 0x98, 0x12, + 0xbe, 0x0b, 0x36, 0x5c, 0xc3, 0x1b, 0x1a, 0x31, 0x52, 0x68, 0xab, 0xa8, 0xc3, 0x30, 0x50, 0x37, + 0x0e, 0xa7, 0x4e, 0xd0, 0x0c, 0x12, 0x7e, 0x02, 0x8a, 0x6c, 0x32, 0x46, 0x57, 0x44, 0x68, 0x99, + 0x83, 0xe2, 0x88, 0x58, 0x53, 0x53, 0x34, 0x56, 0x49, 0x3c, 0x42, 0x63, 0x1a, 0xbe, 0x78, 0x30, + 0xe6, 0xc8, 0x8c, 0xed, 0xf5, 0x18, 0xf6, 0xef, 0xda, 0x9e, 0x4d, 0x4f, 0xb0, 0x55, 0x2d, 0x8a, + 0x74, 0x89, 0xc5, 0xa3, 0xd3, 0x69, 0x65, 0x41, 0x50, 0x9e, 0x2f, 0x6c, 0x81, 0x8d, 0xa4, 0xb4, + 0x87, 0xc4, 0xc2, 0xd5, 0x92, 0x78, 0x18, 0xb7, 0xf8, 0xaf, 0xdc, 0x9f, 0x3a, 0x39, 0x9b, 0xb3, + 0xa0, 0x19, 0xdf, 0xf4, 0xa2, 0x01, 0xf2, 0x17, 0x8d, 0xc6, 0xf7, 0x05, 0x50, 0x4a, 0x66, 0xea, + 0x31, 0x00, 0xe6, 0xa4, 0x71, 0x51, 0x39, 0x57, 0x6f, 0xe6, 0x3d, 0x82, 0xb8, 0xc5, 0x25, 0xf3, + 0x20, 0x36, 0x51, 0x94, 0x22, 0x82, 0x9f, 0x82, 0x92, 0xd8, 0xb6, 0x44, 0x0b, 0x5a, 0x7c, 0xe6, + 0x16, 0xb4, 0x1e, 0x06, 0x6a, 0xa9, 0x3d, 0x21, 0x40, 0x09, 0x17, 0xec, 0xa5, 0x53, 0xf6, 0x9c, + 0xed, 0x14, 0x4e, 0xa7, 0x57, 0x5c, 0x31, 0xc3, 0xca, 0x9b, 0x9a, 0xdc, 0x35, 0x96, 0x45, 0x81, + 0xf3, 0xd6, 0x88, 0x26, 0x28, 0x89, 0xbd, 0x08, 0x5b, 0xd8, 0x12, 0x1a, 0x2d, 0xe8, 0x97, 0x25, + 0xb4, 0xd4, 0x9e, 0x1c, 0xa0, 0x04, 0xc3, 0x89, 0xa3, 0x85, 0x47, 0xae, 0x5d, 0x31, 0x71, 0xb4, + 0x1e, 0x21, 0x79, 0x0a, 0xef, 0x80, 0x8a, 0x0c, 0x09, 0x5b, 0xf7, 0x3c, 0x0b, 0x7f, 0x8d, 0xa9, + 0x78, 0x9a, 0x25, 0xbd, 0x2a, 0x3d, 0x2a, 0xfb, 0x33, 0xe7, 0x68, 0xce, 0x03, 0x7e, 0xab, 0x80, + 0xeb, 0x43, 0xcf, 0x24, 0x43, 0x8f, 0x61, 0xab, 0x83, 0x7d, 0xd7, 0xf6, 0xf8, 0x3f, 0x4f, 0x47, + 0xc4, 0xa2, 0x42, 0xb9, 0xe5, 0x9d, 0xdb, 0x99, 0xc5, 0x3e, 0xce, 0xf6, 0x89, 0x74, 0x9e, 0x73, + 0x88, 0xf2, 0x6e, 0x82, 0x2a, 0x28, 0xf8, 0xd8, 0xb0, 0xc6, 0x42, 0xde, 0x05, 0xbd, 0xc4, 0xdb, + 0x28, 0xe2, 0x06, 0x14, 0xd9, 0x1b, 0xbf, 0x28, 0xe0, 0xd2, 0xcc, 0x56, 0xfb, 0xdf, 0x5f, 0x5b, + 0x1a, 0xbf, 0x2a, 0x20, 0x2f, 0x17, 0xf0, 0x28, 0xad, 0x0b, 0xfe, 0xac, 0x4a, 0xfa, 0xce, 0x94, + 0x26, 0xce, 0x02, 0xf5, 0x66, 0xde, 0xff, 0xbc, 0x7c, 0x0b, 0xa1, 0xda, 0xf1, 0xbd, 0x3b, 0x69, + 0xe1, 0x7c, 0x18, 0x0b, 0x67, 0x51, 0xd0, 0x35, 0x13, 0xd1, 0x5c, 0x8c, 0x4b, 0xba, 0xeb, 0x5b, + 0x8f, 0x9e, 0xd6, 0x17, 0x1e, 0x3f, 0xad, 0x2f, 0x3c, 0x79, 0x5a, 0x5f, 0xf8, 0x26, 0xac, 0x2b, + 0x8f, 0xc2, 0xba, 0xf2, 0x38, 0xac, 0x2b, 0x4f, 0xc2, 0xba, 0xf2, 0x47, 0x58, 0x57, 0xbe, 0xfb, + 0xb3, 0xbe, 0xf0, 0xd9, 0xe2, 0x68, 0xfb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x54, 0x31, 0x16, + 0xcf, 0xa2, 0x10, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -975,6 +976,11 @@ func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Ready != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Ready)) + i-- + dAtA[i] = 0x48 + } if m.UncountedTerminatedPods != nil { { size, err := m.UncountedTerminatedPods.MarshalToSizedBuffer(dAtA[:i]) @@ -1341,6 +1347,9 @@ func (m *JobStatus) Size() (n int) { l = m.UncountedTerminatedPods.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Ready != nil { + n += 1 + sovGenerated(uint64(*m.Ready)) + } return n } @@ -1525,6 +1534,7 @@ func (this *JobStatus) String() string { `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, `CompletedIndexes:` + fmt.Sprintf("%v", this.CompletedIndexes) + `,`, `UncountedTerminatedPods:` + strings.Replace(this.UncountedTerminatedPods.String(), "UncountedTerminatedPods", "UncountedTerminatedPods", 1) + `,`, + `Ready:` + valueToStringGenerated(this.Ready) + `,`, `}`, }, "") return s @@ -3273,6 +3283,26 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ready = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 1aefe0526..161886029 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -227,8 +227,6 @@ message JobSpec { // guarantees (e.g. finalizers) will be honored. If this field is unset, // the Job won't be automatically deleted. If this field is set to zero, // the Job becomes eligible to be deleted immediately after it finishes. - // This field is alpha-level and is only honored by servers that enable the - // TTLAfterFinished feature. // +optional optional int32 ttlSecondsAfterFinished = 8; @@ -300,7 +298,7 @@ message JobStatus { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; - // The number of actively running pods. + // The number of pending and running pods. // +optional optional int32 active = 4; @@ -333,12 +331,20 @@ message JobStatus { // (3) Remove the pod UID from the arrays while increasing the corresponding // counter. // - // This field is alpha-level. The job controller only makes use of this field - // when the feature gate PodTrackingWithFinalizers is enabled. + // This field is beta-level. The job controller only makes use of this field + // when the feature gate JobTrackingWithFinalizers is enabled (enabled + // by default). // Old jobs might not be tracked using this field, in which case the field // remains null. // +optional optional UncountedTerminatedPods uncountedTerminatedPods = 8; + + // The number of pods which have a Ready condition. + // + // This field is alpha-level. The job controller populates the field when + // the feature gate JobReadyPods is enabled (disabled by default). + // +optional + optional int32 ready = 9; } // JobTemplateSpec describes the data a Job should have when created from a template diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 77742e5f8..aec1cad8e 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -71,6 +71,7 @@ type JobList struct { } // CompletionMode specifies how Pod completions of a Job are tracked. +// +enum type CompletionMode string const ( @@ -154,8 +155,6 @@ type JobSpec struct { // guarantees (e.g. finalizers) will be honored. If this field is unset, // the Job won't be automatically deleted. If this field is set to zero, // the Job becomes eligible to be deleted immediately after it finishes. - // This field is alpha-level and is only honored by servers that enable the - // TTLAfterFinished feature. // +optional TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` @@ -227,7 +226,7 @@ type JobStatus struct { // +optional CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - // The number of actively running pods. + // The number of pending and running pods. // +optional Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` @@ -260,12 +259,20 @@ type JobStatus struct { // (3) Remove the pod UID from the arrays while increasing the corresponding // counter. // - // This field is alpha-level. The job controller only makes use of this field - // when the feature gate PodTrackingWithFinalizers is enabled. + // This field is beta-level. The job controller only makes use of this field + // when the feature gate JobTrackingWithFinalizers is enabled (enabled + // by default). // Old jobs might not be tracked using this field, in which case the field // remains null. // +optional UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` + + // The number of pods which have a Ready condition. + // + // This field is alpha-level. The job controller populates the field when + // the feature gate JobReadyPods is enabled (disabled by default). + // +optional + Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` } // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't @@ -282,6 +289,7 @@ type UncountedTerminatedPods struct { Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"` } +// +enum type JobConditionType string // These are valid conditions of a job. @@ -406,6 +414,7 @@ type CronJobSpec struct { // Only one of the following concurrent policies may be specified. // If none of the following policies is specified, the default one // is AllowConcurrent. +// +enum type ConcurrencyPolicy string const ( diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index f6e01000d..269021a9c 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -118,7 +118,7 @@ var map_JobSpec = map[string]string{ "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.", + "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nThis field is beta-level. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.", "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\nThis field is beta-level, gated by SuspendJob feature flag (enabled by default).", } @@ -132,11 +132,12 @@ var map_JobStatus = map[string]string{ "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.", "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", - "active": "The number of actively running pods.", + "active": "The number of pending and running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "uncountedTerminatedPods": "UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nThis field is alpha-level. The job controller only makes use of this field when the feature gate PodTrackingWithFinalizers is enabled. Old jobs might not be tracked using this field, in which case the field remains null.", + "uncountedTerminatedPods": "UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nThis field is beta-level. The job controller only makes use of this field when the feature gate JobTrackingWithFinalizers is enabled (enabled by default). Old jobs might not be tracked using this field, in which case the field remains null.", + "ready": "The number of pods which have a Ready condition.\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobReadyPods is enabled (disabled by default).", } func (JobStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index fe60aaf37..a9806a502 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -318,6 +319,11 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) { *out = new(UncountedTerminatedPods) (*in).DeepCopyInto(*out) } + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(int32) + **out = **in + } return } diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index 77244485b..8c256848d 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go index 5379f896c..2836b3b01 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go index 4bfd1fb61..a3900e1dc 100644 --- a/vendor/k8s.io/api/certificates/v1/types.go +++ b/vendor/k8s.io/api/certificates/v1/types.go @@ -214,6 +214,7 @@ type CertificateSigningRequestStatus struct { } // RequestConditionType is the type of a CertificateSigningRequestCondition +// +enum type RequestConditionType string // Well-known condition types for certificate requests. @@ -278,6 +279,7 @@ type CertificateSigningRequestList struct { // KeyUsage specifies valid usage contexts for keys. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +// +enum type KeyUsage string // Valid key usages diff --git a/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go index af591b46f..aefa60411 100644 --- a/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index 48e460aac..a315e2ac6 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go index 6ccebfe6f..480a32936 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go index 2dd7eddbc..99f6b0be7 100644 --- a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index de6962137..3adfd8720 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go index 9d18b9225..18926aa10 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 7fde09126..eb9517e1d 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -121,7 +121,7 @@ const ( EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" // EndpointsOverCapacity will be set on an Endpoints resource when it - // exceeds the maximum capacity of 1000 addresses. Inititially the Endpoints + // exceeds the maximum capacity of 1000 addresses. Initially the Endpoints // controller will set this annotation with a value of "warning". In a // future release, the controller may set this annotation with a value of // "truncated" to indicate that any addresses exceeding the limit of 1000 diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 259444371..0418699e6 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -1729,10 +1729,38 @@ func (m *GCEPersistentDiskVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo +func (m *GRPCAction) Reset() { *m = GRPCAction{} } +func (*GRPCAction) ProtoMessage() {} +func (*GRPCAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{60} +} +func (m *GRPCAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GRPCAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GRPCAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_GRPCAction.Merge(m, src) +} +func (m *GRPCAction) XXX_Size() int { + return m.Size() +} +func (m *GRPCAction) XXX_DiscardUnknown() { + xxx_messageInfo_GRPCAction.DiscardUnknown(m) +} + +var xxx_messageInfo_GRPCAction proto.InternalMessageInfo + func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_83c10c24ec417dc9, []int{61} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1788,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} + return fileDescriptor_83c10c24ec417dc9, []int{62} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1816,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} + return fileDescriptor_83c10c24ec417dc9, []int{63} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1844,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} + return fileDescriptor_83c10c24ec417dc9, []int{64} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1872,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} + return fileDescriptor_83c10c24ec417dc9, []int{65} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1869,34 +1897,6 @@ func (m *HTTPHeader) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo -func (m *Handler) Reset() { *m = Handler{} } -func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} -} -func (m *Handler) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Handler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Handler) XXX_Merge(src proto.Message) { - xxx_messageInfo_Handler.Merge(m, src) -} -func (m *Handler) XXX_Size() int { - return m.Size() -} -func (m *Handler) XXX_DiscardUnknown() { - xxx_messageInfo_Handler.DiscardUnknown(m) -} - -var xxx_messageInfo_Handler proto.InternalMessageInfo - func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { @@ -2065,10 +2065,38 @@ func (m *Lifecycle) XXX_DiscardUnknown() { var xxx_messageInfo_Lifecycle proto.InternalMessageInfo +func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } +func (*LifecycleHandler) ProtoMessage() {} +func (*LifecycleHandler) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{72} +} +func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LifecycleHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LifecycleHandler) XXX_Merge(src proto.Message) { + xxx_messageInfo_LifecycleHandler.Merge(m, src) +} +func (m *LifecycleHandler) XXX_Size() int { + return m.Size() +} +func (m *LifecycleHandler) XXX_DiscardUnknown() { + xxx_messageInfo_LifecycleHandler.DiscardUnknown(m) +} + +var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo + func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_83c10c24ec417dc9, []int{73} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2124,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} + return fileDescriptor_83c10c24ec417dc9, []int{74} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2152,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} + return fileDescriptor_83c10c24ec417dc9, []int{75} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2180,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} + return fileDescriptor_83c10c24ec417dc9, []int{76} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2208,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} + return fileDescriptor_83c10c24ec417dc9, []int{77} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2236,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} + return fileDescriptor_83c10c24ec417dc9, []int{78} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2264,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} + return fileDescriptor_83c10c24ec417dc9, []int{79} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2292,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} + return fileDescriptor_83c10c24ec417dc9, []int{80} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2320,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} + return fileDescriptor_83c10c24ec417dc9, []int{81} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2348,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} + return fileDescriptor_83c10c24ec417dc9, []int{82} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2376,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} + return fileDescriptor_83c10c24ec417dc9, []int{83} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2404,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} + return fileDescriptor_83c10c24ec417dc9, []int{84} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2432,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} + return fileDescriptor_83c10c24ec417dc9, []int{85} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2460,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} + return fileDescriptor_83c10c24ec417dc9, []int{86} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2488,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} + return fileDescriptor_83c10c24ec417dc9, []int{87} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2516,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} + return fileDescriptor_83c10c24ec417dc9, []int{88} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2544,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} + return fileDescriptor_83c10c24ec417dc9, []int{89} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2572,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} + return fileDescriptor_83c10c24ec417dc9, []int{90} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2600,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} + return fileDescriptor_83c10c24ec417dc9, []int{91} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2628,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} + return fileDescriptor_83c10c24ec417dc9, []int{92} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2656,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} + return fileDescriptor_83c10c24ec417dc9, []int{93} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2684,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} + return fileDescriptor_83c10c24ec417dc9, []int{94} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2712,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} + return fileDescriptor_83c10c24ec417dc9, []int{95} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2740,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} + return fileDescriptor_83c10c24ec417dc9, []int{96} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2768,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} + return fileDescriptor_83c10c24ec417dc9, []int{97} } func (m *NodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2796,7 @@ var xxx_messageInfo_NodeResources proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} + return fileDescriptor_83c10c24ec417dc9, []int{98} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2824,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} + return fileDescriptor_83c10c24ec417dc9, []int{99} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2852,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} + return fileDescriptor_83c10c24ec417dc9, []int{100} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2880,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} + return fileDescriptor_83c10c24ec417dc9, []int{101} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2908,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} + return fileDescriptor_83c10c24ec417dc9, []int{102} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +2936,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} + return fileDescriptor_83c10c24ec417dc9, []int{103} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +2964,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} + return fileDescriptor_83c10c24ec417dc9, []int{104} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +2992,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} + return fileDescriptor_83c10c24ec417dc9, []int{105} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3020,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} + return fileDescriptor_83c10c24ec417dc9, []int{106} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3048,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} + return fileDescriptor_83c10c24ec417dc9, []int{107} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3076,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} + return fileDescriptor_83c10c24ec417dc9, []int{108} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3104,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} + return fileDescriptor_83c10c24ec417dc9, []int{109} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3132,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} + return fileDescriptor_83c10c24ec417dc9, []int{110} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3160,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} + return fileDescriptor_83c10c24ec417dc9, []int{111} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3188,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} + return fileDescriptor_83c10c24ec417dc9, []int{112} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3216,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} + return fileDescriptor_83c10c24ec417dc9, []int{113} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3244,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} + return fileDescriptor_83c10c24ec417dc9, []int{114} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3272,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} + return fileDescriptor_83c10c24ec417dc9, []int{115} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3300,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} + return fileDescriptor_83c10c24ec417dc9, []int{116} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3328,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} + return fileDescriptor_83c10c24ec417dc9, []int{117} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} + return fileDescriptor_83c10c24ec417dc9, []int{118} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3384,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} + return fileDescriptor_83c10c24ec417dc9, []int{119} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3412,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} + return fileDescriptor_83c10c24ec417dc9, []int{120} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3440,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} + return fileDescriptor_83c10c24ec417dc9, []int{121} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3468,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} + return fileDescriptor_83c10c24ec417dc9, []int{122} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3496,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} + return fileDescriptor_83c10c24ec417dc9, []int{123} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3524,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} + return fileDescriptor_83c10c24ec417dc9, []int{124} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3552,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} + return fileDescriptor_83c10c24ec417dc9, []int{125} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3580,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} + return fileDescriptor_83c10c24ec417dc9, []int{126} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3608,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} + return fileDescriptor_83c10c24ec417dc9, []int{127} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3636,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} + return fileDescriptor_83c10c24ec417dc9, []int{128} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3664,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} + return fileDescriptor_83c10c24ec417dc9, []int{129} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3692,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} + return fileDescriptor_83c10c24ec417dc9, []int{130} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3689,10 +3717,38 @@ func (m *PodLogOptions) XXX_DiscardUnknown() { var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo +func (m *PodOS) Reset() { *m = PodOS{} } +func (*PodOS) ProtoMessage() {} +func (*PodOS) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{131} +} +func (m *PodOS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodOS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodOS) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodOS.Merge(m, src) +} +func (m *PodOS) XXX_Size() int { + return m.Size() +} +func (m *PodOS) XXX_DiscardUnknown() { + xxx_messageInfo_PodOS.DiscardUnknown(m) +} + +var xxx_messageInfo_PodOS proto.InternalMessageInfo + func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} + return fileDescriptor_83c10c24ec417dc9, []int{132} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3776,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_83c10c24ec417dc9, []int{133} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3804,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_83c10c24ec417dc9, []int{134} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3832,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_83c10c24ec417dc9, []int{135} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3860,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_83c10c24ec417dc9, []int{136} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3888,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_83c10c24ec417dc9, []int{137} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3916,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_83c10c24ec417dc9, []int{138} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3944,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_83c10c24ec417dc9, []int{139} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +3972,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_83c10c24ec417dc9, []int{140} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +4000,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_83c10c24ec417dc9, []int{141} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4028,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_83c10c24ec417dc9, []int{142} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4056,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4084,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4112,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4140,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4168,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4196,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4165,10 +4221,38 @@ func (m *Probe) XXX_DiscardUnknown() { var xxx_messageInfo_Probe proto.InternalMessageInfo +func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } +func (*ProbeHandler) ProtoMessage() {} +func (*ProbeHandler) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{149} +} +func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProbeHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProbeHandler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeHandler.Merge(m, src) +} +func (m *ProbeHandler) XXX_Size() int { + return m.Size() +} +func (m *ProbeHandler) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeHandler.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo + func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4280,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4308,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4336,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4364,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4392,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4420,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4448,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4476,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4504,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4532,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4560,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4588,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4616,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4644,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4672,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4700,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4728,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4756,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4784,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4812,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4840,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4868,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4896,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4924,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4952,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4980,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +5008,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +5036,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5064,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5092,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5120,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5148,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5176,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5204,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5232,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5260,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5288,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5316,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5344,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5372,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5400,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5428,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5456,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5484,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5512,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5540,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5568,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5596,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5624,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5652,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5680,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5708,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5736,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5764,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5792,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5820,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5848,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5876,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{208} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5904,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{209} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5912,18 +5996,19 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexVolumeSource.OptionsEntry") proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GRPCAction)(nil), "k8s.io.api.core.v1.GRPCAction") proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") + proto.RegisterType((*LifecycleHandler)(nil), "k8s.io.api.core.v1.LifecycleHandler") proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultEntry") @@ -5971,6 +6056,7 @@ func init() { proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourcesEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry") proto.RegisterType((*PersistentVolumeClaimTemplate)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimTemplate") proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") @@ -5992,6 +6078,7 @@ func init() { proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP") proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") + proto.RegisterType((*PodOS)(nil), "k8s.io.api.core.v1.PodOS") proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") @@ -6011,6 +6098,7 @@ func init() { proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") + proto.RegisterType((*ProbeHandler)(nil), "k8s.io.api.core.v1.ProbeHandler") proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") @@ -6087,887 +6175,897 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 14068 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x70, 0x5c, 0xd9, - 0x79, 0x18, 0xaa, 0xdb, 0x8d, 0xad, 0x3f, 0xec, 0x07, 0x24, 0x07, 0xc4, 0x0c, 0x09, 0xce, 0xa5, - 0xc4, 0xe1, 0x68, 0x66, 0x40, 0x71, 0x16, 0x69, 0x3c, 0x23, 0x8d, 0x05, 0xa0, 0x01, 0xb2, 0x87, - 0x04, 0xd8, 0x73, 0x1a, 0x24, 0x25, 0x79, 0xa4, 0xd2, 0x45, 0xf7, 0x01, 0x70, 0x85, 0xee, 0x7b, - 0x7b, 0xee, 0xbd, 0x0d, 0x12, 0xf3, 0xe4, 0x7a, 0x7e, 0xf2, 0x2a, 0x2f, 0xaf, 0x54, 0xaf, 0xfc, - 0xb2, 0xd8, 0x2e, 0x57, 0xca, 0x71, 0xca, 0x56, 0x94, 0xcd, 0xb1, 0x63, 0x3b, 0x96, 0x13, 0x3b, - 0xbb, 0x93, 0x1f, 0x8e, 0xe3, 0x4a, 0x2c, 0x57, 0xb9, 0x82, 0xd8, 0x74, 0xaa, 0x5c, 0xfa, 0x11, - 0xdb, 0x89, 0x93, 0x1f, 0x41, 0x5c, 0x71, 0xea, 0xac, 0xf7, 0x9c, 0xbb, 0x74, 0x37, 0x38, 0x20, - 0x34, 0x52, 0xcd, 0xbf, 0xee, 0xf3, 0x7d, 0xe7, 0x3b, 0xe7, 0x9e, 0xf5, 0x3b, 0xdf, 0x0a, 0xaf, - 0xee, 0xbe, 0x1c, 0x2e, 0xb8, 0xfe, 0x95, 0xdd, 0xce, 0x26, 0x09, 0x3c, 0x12, 0x91, 0xf0, 0xca, - 0x1e, 0xf1, 0x1a, 0x7e, 0x70, 0x45, 0x00, 0x9c, 0xb6, 0x7b, 0xa5, 0xee, 0x07, 0xe4, 0xca, 0xde, - 0xd5, 0x2b, 0xdb, 0xc4, 0x23, 0x81, 0x13, 0x91, 0xc6, 0x42, 0x3b, 0xf0, 0x23, 0x1f, 0x21, 0x8e, - 0xb3, 0xe0, 0xb4, 0xdd, 0x05, 0x8a, 0xb3, 0xb0, 0x77, 0x75, 0xee, 0xb9, 0x6d, 0x37, 0xda, 0xe9, - 0x6c, 0x2e, 0xd4, 0xfd, 0xd6, 0x95, 0x6d, 0x7f, 0xdb, 0xbf, 0xc2, 0x50, 0x37, 0x3b, 0x5b, 0xec, - 0x1f, 0xfb, 0xc3, 0x7e, 0x71, 0x12, 0x73, 0x2f, 0xc6, 0xcd, 0xb4, 0x9c, 0xfa, 0x8e, 0xeb, 0x91, - 0x60, 0xff, 0x4a, 0x7b, 0x77, 0x9b, 0xb5, 0x1b, 0x90, 0xd0, 0xef, 0x04, 0x75, 0x92, 0x6c, 0xb8, - 0x6b, 0xad, 0xf0, 0x4a, 0x8b, 0x44, 0x4e, 0x46, 0x77, 0xe7, 0xae, 0xe4, 0xd5, 0x0a, 0x3a, 0x5e, - 0xe4, 0xb6, 0xd2, 0xcd, 0x7c, 0xb8, 0x57, 0x85, 0xb0, 0xbe, 0x43, 0x5a, 0x4e, 0xaa, 0xde, 0x0b, - 0x79, 0xf5, 0x3a, 0x91, 0xdb, 0xbc, 0xe2, 0x7a, 0x51, 0x18, 0x05, 0xc9, 0x4a, 0xf6, 0xd7, 0x2c, - 0xb8, 0xb0, 0x78, 0xb7, 0xb6, 0xd2, 0x74, 0xc2, 0xc8, 0xad, 0x2f, 0x35, 0xfd, 0xfa, 0x6e, 0x2d, - 0xf2, 0x03, 0x72, 0xc7, 0x6f, 0x76, 0x5a, 0xa4, 0xc6, 0x06, 0x02, 0x3d, 0x0b, 0x23, 0x7b, 0xec, - 0x7f, 0xa5, 0x3c, 0x6b, 0x5d, 0xb0, 0x2e, 0x97, 0x96, 0xa6, 0x7e, 0xe3, 0x60, 0xfe, 0x7d, 0x0f, - 0x0e, 0xe6, 0x47, 0xee, 0x88, 0x72, 0xac, 0x30, 0xd0, 0x25, 0x18, 0xda, 0x0a, 0x37, 0xf6, 0xdb, - 0x64, 0xb6, 0xc0, 0x70, 0x27, 0x04, 0xee, 0xd0, 0x6a, 0x8d, 0x96, 0x62, 0x01, 0x45, 0x57, 0xa0, - 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0x2d, 0x5e, 0xb0, 0x2e, 0x0f, 0x2e, 0x4d, 0x0b, - 0xd4, 0x52, 0x55, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x71, 0xcb, 0x6b, 0xee, 0xcf, - 0x0e, 0x5c, 0xb0, 0x2e, 0x8f, 0xc4, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0xc7, 0x0a, 0x30, - 0xb2, 0xb8, 0xb5, 0xe5, 0x7a, 0x6e, 0xb4, 0x8f, 0xee, 0xc0, 0x98, 0xe7, 0x37, 0x88, 0xfc, 0xcf, - 0xbe, 0x62, 0xf4, 0xf9, 0x0b, 0x0b, 0xe9, 0xa5, 0xb4, 0xb0, 0xae, 0xe1, 0x2d, 0x4d, 0x3d, 0x38, - 0x98, 0x1f, 0xd3, 0x4b, 0xb0, 0x41, 0x07, 0x61, 0x18, 0x6d, 0xfb, 0x0d, 0x45, 0xb6, 0xc0, 0xc8, - 0xce, 0x67, 0x91, 0xad, 0xc6, 0x68, 0x4b, 0x93, 0x0f, 0x0e, 0xe6, 0x47, 0xb5, 0x02, 0xac, 0x13, - 0x41, 0x9b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0x8b, 0x79, 0x74, 0x35, - 0xd4, 0xa5, 0x99, 0x07, 0x07, 0xf3, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, 0x0d, 0x13, 0x8b, - 0x51, 0xe4, 0xd4, 0x77, 0x48, 0x83, 0xcf, 0x20, 0x7a, 0x11, 0x06, 0x3c, 0xa7, 0x45, 0xc4, 0xfc, - 0x5e, 0x10, 0x03, 0x3b, 0xb0, 0xee, 0xb4, 0xc8, 0xe1, 0xc1, 0xfc, 0xd4, 0x6d, 0xcf, 0x7d, 0xab, - 0x23, 0x56, 0x05, 0x2d, 0xc3, 0x0c, 0x1b, 0x3d, 0x0f, 0xd0, 0x20, 0x7b, 0x6e, 0x9d, 0x54, 0x9d, - 0x68, 0x47, 0xcc, 0x37, 0x12, 0x75, 0xa1, 0xac, 0x20, 0x58, 0xc3, 0xb2, 0xef, 0x43, 0x69, 0x71, - 0xcf, 0x77, 0x1b, 0x55, 0xbf, 0x11, 0xa2, 0x5d, 0x98, 0x6c, 0x07, 0x64, 0x8b, 0x04, 0xaa, 0x68, - 0xd6, 0xba, 0x50, 0xbc, 0x3c, 0xfa, 0xfc, 0xe5, 0xcc, 0x8f, 0x35, 0x51, 0x57, 0xbc, 0x28, 0xd8, - 0x5f, 0x7a, 0x4c, 0xb4, 0x37, 0x99, 0x80, 0xe2, 0x24, 0x65, 0xfb, 0x9f, 0x17, 0xe0, 0xf4, 0xe2, - 0xdb, 0x9d, 0x80, 0x94, 0xdd, 0x70, 0x37, 0xb9, 0xc2, 0x1b, 0x6e, 0xb8, 0xbb, 0x1e, 0x8f, 0x80, - 0x5a, 0x5a, 0x65, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x0e, 0x86, 0xe9, 0xef, 0xdb, 0xb8, 0x22, 0x3e, - 0x79, 0x46, 0x20, 0x8f, 0x96, 0x9d, 0xc8, 0x29, 0x73, 0x10, 0x96, 0x38, 0x68, 0x0d, 0x46, 0xeb, - 0x6c, 0x43, 0x6e, 0xaf, 0xf9, 0x0d, 0xc2, 0x26, 0xb3, 0xb4, 0xf4, 0x0c, 0x45, 0x5f, 0x8e, 0x8b, - 0x0f, 0x0f, 0xe6, 0x67, 0x79, 0xdf, 0x04, 0x09, 0x0d, 0x86, 0xf5, 0xfa, 0xc8, 0x56, 0xfb, 0x6b, - 0x80, 0x51, 0x82, 0x8c, 0xbd, 0x75, 0x59, 0xdb, 0x2a, 0x83, 0x6c, 0xab, 0x8c, 0x65, 0x6f, 0x13, - 0x74, 0x15, 0x06, 0x76, 0x5d, 0xaf, 0x31, 0x3b, 0xc4, 0x68, 0x9d, 0xa3, 0x73, 0x7e, 0xc3, 0xf5, - 0x1a, 0x87, 0x07, 0xf3, 0xd3, 0x46, 0x77, 0x68, 0x21, 0x66, 0xa8, 0xf6, 0x9f, 0x59, 0x30, 0xcf, - 0x60, 0xab, 0x6e, 0x93, 0x54, 0x49, 0x10, 0xba, 0x61, 0x44, 0xbc, 0xc8, 0x18, 0xd0, 0xe7, 0x01, - 0x42, 0x52, 0x0f, 0x48, 0xa4, 0x0d, 0xa9, 0x5a, 0x18, 0x35, 0x05, 0xc1, 0x1a, 0x16, 0x3d, 0x10, - 0xc2, 0x1d, 0x27, 0x60, 0xeb, 0x4b, 0x0c, 0xac, 0x3a, 0x10, 0x6a, 0x12, 0x80, 0x63, 0x1c, 0xe3, - 0x40, 0x28, 0xf6, 0x3a, 0x10, 0xd0, 0xc7, 0x60, 0x32, 0x6e, 0x2c, 0x6c, 0x3b, 0x75, 0x39, 0x80, - 0x6c, 0xcb, 0xd4, 0x4c, 0x10, 0x4e, 0xe2, 0xda, 0x7f, 0xd3, 0x12, 0x8b, 0x87, 0x7e, 0xf5, 0xbb, - 0xfc, 0x5b, 0xed, 0x5f, 0xb6, 0x60, 0x78, 0xc9, 0xf5, 0x1a, 0xae, 0xb7, 0x8d, 0x3e, 0x0b, 0x23, - 0xf4, 0x6e, 0x6a, 0x38, 0x91, 0x23, 0xce, 0xbd, 0x0f, 0x69, 0x7b, 0x4b, 0x5d, 0x15, 0x0b, 0xed, - 0xdd, 0x6d, 0x5a, 0x10, 0x2e, 0x50, 0x6c, 0xba, 0xdb, 0x6e, 0x6d, 0x7e, 0x8e, 0xd4, 0xa3, 0x35, - 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x60, 0x28, 0x72, 0x82, 0x6d, 0x12, - 0x89, 0x03, 0x30, 0xf3, 0xa0, 0xe2, 0x35, 0x31, 0xdd, 0x91, 0xc4, 0xab, 0x93, 0xf8, 0x5a, 0xd8, - 0x60, 0x55, 0xb1, 0x20, 0x61, 0xff, 0xc8, 0x30, 0x9c, 0x5d, 0xae, 0x55, 0x72, 0xd6, 0xd5, 0x25, - 0x18, 0x6a, 0x04, 0xee, 0x1e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x65, 0x56, 0x8a, 0x05, 0x14, 0xbd, - 0x0c, 0x63, 0xfc, 0x42, 0xba, 0xee, 0x78, 0x8d, 0xa6, 0x1c, 0xe2, 0x53, 0x02, 0x7b, 0xec, 0x8e, - 0x06, 0xc3, 0x06, 0xe6, 0x11, 0x17, 0xd5, 0xa5, 0xc4, 0x66, 0xcc, 0xbb, 0xec, 0xbe, 0x68, 0xc1, - 0x14, 0x6f, 0x66, 0x31, 0x8a, 0x02, 0x77, 0xb3, 0x13, 0x91, 0x70, 0x76, 0x90, 0x9d, 0x74, 0xcb, - 0x59, 0xa3, 0x95, 0x3b, 0x02, 0x0b, 0x77, 0x12, 0x54, 0xf8, 0x21, 0x38, 0x2b, 0xda, 0x9d, 0x4a, - 0x82, 0x71, 0xaa, 0x59, 0xf4, 0xdd, 0x16, 0xcc, 0xd5, 0x7d, 0x2f, 0x0a, 0xfc, 0x66, 0x93, 0x04, - 0xd5, 0xce, 0x66, 0xd3, 0x0d, 0x77, 0xf8, 0x3a, 0xc5, 0x64, 0x8b, 0x9d, 0x04, 0x39, 0x73, 0xa8, - 0x90, 0xc4, 0x1c, 0x9e, 0x7f, 0x70, 0x30, 0x3f, 0xb7, 0x9c, 0x4b, 0x0a, 0x77, 0x69, 0x06, 0xed, - 0x02, 0xa2, 0x57, 0x69, 0x2d, 0x72, 0xb6, 0x49, 0xdc, 0xf8, 0x70, 0xff, 0x8d, 0x9f, 0x79, 0x70, - 0x30, 0x8f, 0xd6, 0x53, 0x24, 0x70, 0x06, 0x59, 0xf4, 0x16, 0x9c, 0xa2, 0xa5, 0xa9, 0x6f, 0x1d, - 0xe9, 0xbf, 0xb9, 0xd9, 0x07, 0x07, 0xf3, 0xa7, 0xd6, 0x33, 0x88, 0xe0, 0x4c, 0xd2, 0xe8, 0xbb, - 0x2c, 0x38, 0x1b, 0x7f, 0xfe, 0xca, 0xfd, 0xb6, 0xe3, 0x35, 0xe2, 0x86, 0x4b, 0xfd, 0x37, 0x4c, - 0xcf, 0xe4, 0xb3, 0xcb, 0x79, 0x94, 0x70, 0x7e, 0x23, 0x73, 0xcb, 0x70, 0x3a, 0x73, 0xb5, 0xa0, - 0x29, 0x28, 0xee, 0x12, 0xce, 0x05, 0x95, 0x30, 0xfd, 0x89, 0x4e, 0xc1, 0xe0, 0x9e, 0xd3, 0xec, - 0x88, 0x8d, 0x82, 0xf9, 0x9f, 0x57, 0x0a, 0x2f, 0x5b, 0xf6, 0xbf, 0x28, 0xc2, 0xe4, 0x72, 0xad, - 0xf2, 0x50, 0xbb, 0x50, 0xbf, 0x86, 0x0a, 0x5d, 0xaf, 0xa1, 0xf8, 0x52, 0x2b, 0xe6, 0x5e, 0x6a, - 0xff, 0x77, 0xc6, 0x16, 0x1a, 0x60, 0x5b, 0xe8, 0xdb, 0x72, 0xb6, 0xd0, 0x31, 0x6f, 0x9c, 0xbd, - 0x9c, 0x55, 0x34, 0xc8, 0x26, 0x33, 0x93, 0x63, 0xb9, 0xe9, 0xd7, 0x9d, 0x66, 0xf2, 0xe8, 0x3b, - 0xe2, 0x52, 0x3a, 0x9e, 0x79, 0xac, 0xc3, 0xd8, 0xb2, 0xd3, 0x76, 0x36, 0xdd, 0xa6, 0x1b, 0xb9, - 0x24, 0x44, 0x4f, 0x41, 0xd1, 0x69, 0x34, 0x18, 0xb7, 0x55, 0x5a, 0x3a, 0xfd, 0xe0, 0x60, 0xbe, - 0xb8, 0xd8, 0xa0, 0xd7, 0x3e, 0x28, 0xac, 0x7d, 0x4c, 0x31, 0xd0, 0x07, 0x61, 0xa0, 0x11, 0xf8, - 0xed, 0xd9, 0x02, 0xc3, 0xa4, 0xbb, 0x6e, 0xa0, 0x1c, 0xf8, 0xed, 0x04, 0x2a, 0xc3, 0xb1, 0x7f, - 0xbd, 0x00, 0x4f, 0x2c, 0x93, 0xf6, 0xce, 0x6a, 0x2d, 0xe7, 0xfc, 0xbe, 0x0c, 0x23, 0x2d, 0xdf, - 0x73, 0x23, 0x3f, 0x08, 0x45, 0xd3, 0x6c, 0x45, 0xac, 0x89, 0x32, 0xac, 0xa0, 0xe8, 0x02, 0x0c, - 0xb4, 0x63, 0xa6, 0x72, 0x4c, 0x32, 0xa4, 0x8c, 0x9d, 0x64, 0x10, 0x8a, 0xd1, 0x09, 0x49, 0x20, - 0x56, 0x8c, 0xc2, 0xb8, 0x1d, 0x92, 0x00, 0x33, 0x48, 0x7c, 0x33, 0xd3, 0x3b, 0x5b, 0x9c, 0xd0, - 0x89, 0x9b, 0x99, 0x42, 0xb0, 0x86, 0x85, 0xaa, 0x50, 0x0a, 0x13, 0x33, 0xdb, 0xd7, 0x36, 0x1d, - 0x67, 0x57, 0xb7, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0xa3, 0x0c, 0xf5, 0xbc, 0xba, 0xbf, 0x5a, 0x00, - 0xc4, 0x87, 0xf0, 0x9b, 0x6c, 0xe0, 0x6e, 0xa7, 0x07, 0xae, 0xff, 0x2d, 0x71, 0x5c, 0xa3, 0xf7, - 0xdf, 0x2d, 0x78, 0x62, 0xd9, 0xf5, 0x1a, 0x24, 0xc8, 0x59, 0x80, 0x8f, 0xe6, 0x2d, 0x7b, 0x34, - 0xa6, 0xc1, 0x58, 0x62, 0x03, 0xc7, 0xb0, 0xc4, 0xec, 0x3f, 0xb1, 0x00, 0xf1, 0xcf, 0x7e, 0xd7, - 0x7d, 0xec, 0xed, 0xf4, 0xc7, 0x1e, 0xc3, 0xb2, 0xb0, 0x6f, 0xc2, 0xc4, 0x72, 0xd3, 0x25, 0x5e, - 0x54, 0xa9, 0x2e, 0xfb, 0xde, 0x96, 0xbb, 0x8d, 0x5e, 0x81, 0x89, 0xc8, 0x6d, 0x11, 0xbf, 0x13, - 0xd5, 0x48, 0xdd, 0xf7, 0xd8, 0x4b, 0xd2, 0xba, 0x3c, 0xb8, 0x84, 0x1e, 0x1c, 0xcc, 0x4f, 0x6c, - 0x18, 0x10, 0x9c, 0xc0, 0xb4, 0x7f, 0x8f, 0x8e, 0x9f, 0xdf, 0x6a, 0xfb, 0x1e, 0xf1, 0xa2, 0x65, - 0xdf, 0x6b, 0x70, 0x89, 0xc3, 0x2b, 0x30, 0x10, 0xd1, 0xf1, 0xe0, 0x63, 0x77, 0x49, 0x6e, 0x14, - 0x3a, 0x0a, 0x87, 0x07, 0xf3, 0x67, 0xd2, 0x35, 0xd8, 0x38, 0xb1, 0x3a, 0xe8, 0xdb, 0x60, 0x28, - 0x8c, 0x9c, 0xa8, 0x13, 0x8a, 0xd1, 0x7c, 0x52, 0x8e, 0x66, 0x8d, 0x95, 0x1e, 0x1e, 0xcc, 0x4f, - 0xaa, 0x6a, 0xbc, 0x08, 0x8b, 0x0a, 0xe8, 0x69, 0x18, 0x6e, 0x91, 0x30, 0x74, 0xb6, 0xe5, 0x6d, - 0x38, 0x29, 0xea, 0x0e, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0xe8, 0x22, 0x0c, 0x92, 0x20, 0xf0, 0x03, - 0xb1, 0x47, 0xc7, 0x05, 0xe2, 0xe0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x5b, 0x0b, 0x26, 0x55, - 0x5f, 0x79, 0x5b, 0x27, 0xf0, 0x2a, 0xf8, 0x14, 0x40, 0x5d, 0x7e, 0x60, 0xc8, 0x6e, 0x8f, 0xd1, - 0xe7, 0x2f, 0x65, 0x5e, 0xd4, 0xa9, 0x61, 0x8c, 0x29, 0xab, 0xa2, 0x10, 0x6b, 0xd4, 0xec, 0x7f, - 0x64, 0xc1, 0x4c, 0xe2, 0x8b, 0x6e, 0xba, 0x61, 0x84, 0xde, 0x4c, 0x7d, 0xd5, 0x42, 0x7f, 0x5f, - 0x45, 0x6b, 0xb3, 0x6f, 0x52, 0x4b, 0x59, 0x96, 0x68, 0x5f, 0x74, 0x1d, 0x06, 0xdd, 0x88, 0xb4, - 0xe4, 0xc7, 0x5c, 0xec, 0xfa, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0x54, 0x68, 0x4d, 0xcc, 0x09, 0xd8, - 0xbf, 0x5e, 0x84, 0x12, 0x5f, 0xb6, 0x6b, 0x4e, 0xfb, 0x04, 0xe6, 0xe2, 0x19, 0x28, 0xb9, 0xad, - 0x56, 0x27, 0x72, 0x36, 0xc5, 0x71, 0x3e, 0xc2, 0xb7, 0x56, 0x45, 0x16, 0xe2, 0x18, 0x8e, 0x2a, - 0x30, 0xc0, 0xba, 0xc2, 0xbf, 0xf2, 0xa9, 0xec, 0xaf, 0x14, 0x7d, 0x5f, 0x28, 0x3b, 0x91, 0xc3, - 0x39, 0x29, 0x75, 0x8f, 0xd0, 0x22, 0xcc, 0x48, 0x20, 0x07, 0x60, 0xd3, 0xf5, 0x9c, 0x60, 0x9f, - 0x96, 0xcd, 0x16, 0x19, 0xc1, 0xe7, 0xba, 0x13, 0x5c, 0x52, 0xf8, 0x9c, 0xac, 0xfa, 0xb0, 0x18, - 0x80, 0x35, 0xa2, 0x73, 0x1f, 0x81, 0x92, 0x42, 0x3e, 0x0a, 0x43, 0x34, 0xf7, 0x31, 0x98, 0x4c, - 0xb4, 0xd5, 0xab, 0xfa, 0x98, 0xce, 0x4f, 0xfd, 0x0a, 0x3b, 0x32, 0x44, 0xaf, 0x57, 0xbc, 0x3d, - 0x71, 0xe4, 0xbe, 0x0d, 0xa7, 0x9a, 0x19, 0x27, 0x99, 0x98, 0xd7, 0xfe, 0x4f, 0xbe, 0x27, 0xc4, - 0x67, 0x9f, 0xca, 0x82, 0xe2, 0xcc, 0x36, 0x28, 0x8f, 0xe0, 0xb7, 0xe9, 0x06, 0x71, 0x9a, 0x3a, - 0xbb, 0x7d, 0x4b, 0x94, 0x61, 0x05, 0xa5, 0xe7, 0xdd, 0x29, 0xd5, 0xf9, 0x1b, 0x64, 0xbf, 0x46, - 0x9a, 0xa4, 0x1e, 0xf9, 0xc1, 0x37, 0xb4, 0xfb, 0xe7, 0xf8, 0xe8, 0xf3, 0xe3, 0x72, 0x54, 0x10, - 0x28, 0xde, 0x20, 0xfb, 0x7c, 0x2a, 0xf4, 0xaf, 0x2b, 0x76, 0xfd, 0xba, 0x9f, 0xb3, 0x60, 0x5c, - 0x7d, 0xdd, 0x09, 0x9c, 0x0b, 0x4b, 0xe6, 0xb9, 0x70, 0xae, 0xeb, 0x02, 0xcf, 0x39, 0x11, 0xbe, - 0x5a, 0x80, 0xb3, 0x0a, 0x87, 0xbe, 0x0d, 0xf8, 0x1f, 0xb1, 0xaa, 0xae, 0x40, 0xc9, 0x53, 0x52, - 0x2b, 0xcb, 0x14, 0x17, 0xc5, 0x32, 0xab, 0x18, 0x87, 0xb2, 0x78, 0x5e, 0x2c, 0x5a, 0x1a, 0xd3, - 0xc5, 0xb9, 0x42, 0x74, 0xbb, 0x04, 0xc5, 0x8e, 0xdb, 0x10, 0x17, 0xcc, 0x87, 0xe4, 0x68, 0xdf, - 0xae, 0x94, 0x0f, 0x0f, 0xe6, 0x9f, 0xcc, 0x53, 0x25, 0xd0, 0x9b, 0x2d, 0x5c, 0xb8, 0x5d, 0x29, - 0x63, 0x5a, 0x19, 0x2d, 0xc2, 0xa4, 0xd4, 0x96, 0xdc, 0xa1, 0xec, 0x96, 0xef, 0x89, 0x7b, 0x48, - 0xc9, 0x64, 0xb1, 0x09, 0xc6, 0x49, 0x7c, 0x54, 0x86, 0xa9, 0xdd, 0xce, 0x26, 0x69, 0x92, 0x88, - 0x7f, 0xf0, 0x0d, 0xc2, 0x25, 0x96, 0xa5, 0xf8, 0x65, 0x76, 0x23, 0x01, 0xc7, 0xa9, 0x1a, 0xf6, - 0x5f, 0xb0, 0xfb, 0x40, 0x8c, 0x5e, 0x35, 0xf0, 0xe9, 0xc2, 0xa2, 0xd4, 0xbf, 0x91, 0xcb, 0xb9, - 0x9f, 0x55, 0x71, 0x83, 0xec, 0x6f, 0xf8, 0x94, 0x33, 0xcf, 0x5e, 0x15, 0xc6, 0x9a, 0x1f, 0xe8, - 0xba, 0xe6, 0x7f, 0xa1, 0x00, 0xa7, 0xd5, 0x08, 0x18, 0x4c, 0xe0, 0x37, 0xfb, 0x18, 0x5c, 0x85, - 0xd1, 0x06, 0xd9, 0x72, 0x3a, 0xcd, 0x48, 0x89, 0xcf, 0x07, 0xb9, 0x0a, 0xa5, 0x1c, 0x17, 0x63, - 0x1d, 0xe7, 0x08, 0xc3, 0xf6, 0x3f, 0x46, 0xd9, 0x45, 0x1c, 0x39, 0x74, 0x8d, 0xab, 0x5d, 0x63, - 0xe5, 0xee, 0x9a, 0x8b, 0x30, 0xe8, 0xb6, 0x28, 0x63, 0x56, 0x30, 0xf9, 0xad, 0x0a, 0x2d, 0xc4, - 0x1c, 0x86, 0x3e, 0x00, 0xc3, 0x75, 0xbf, 0xd5, 0x72, 0xbc, 0x06, 0xbb, 0xf2, 0x4a, 0x4b, 0xa3, - 0x94, 0x77, 0x5b, 0xe6, 0x45, 0x58, 0xc2, 0xd0, 0x13, 0x30, 0xe0, 0x04, 0xdb, 0x5c, 0x86, 0x51, - 0x5a, 0x1a, 0xa1, 0x2d, 0x2d, 0x06, 0xdb, 0x21, 0x66, 0xa5, 0xf4, 0x09, 0x76, 0xcf, 0x0f, 0x76, - 0x5d, 0x6f, 0xbb, 0xec, 0x06, 0x62, 0x4b, 0xa8, 0xbb, 0xf0, 0xae, 0x82, 0x60, 0x0d, 0x0b, 0xad, - 0xc2, 0x60, 0xdb, 0x0f, 0xa2, 0x70, 0x76, 0x88, 0x0d, 0xf7, 0x93, 0x39, 0x07, 0x11, 0xff, 0xda, - 0xaa, 0x1f, 0x44, 0xf1, 0x07, 0xd0, 0x7f, 0x21, 0xe6, 0xd5, 0xd1, 0x4d, 0x18, 0x26, 0xde, 0xde, - 0x6a, 0xe0, 0xb7, 0x66, 0x67, 0xf2, 0x29, 0xad, 0x70, 0x14, 0xbe, 0xcc, 0x62, 0x1e, 0x55, 0x14, - 0x63, 0x49, 0x02, 0x7d, 0x1b, 0x14, 0x89, 0xb7, 0x37, 0x3b, 0xcc, 0x28, 0xcd, 0xe5, 0x50, 0xba, - 0xe3, 0x04, 0xf1, 0x99, 0xbf, 0xe2, 0xed, 0x61, 0x5a, 0x07, 0x7d, 0x12, 0x4a, 0xf2, 0xc0, 0x08, - 0x85, 0xb0, 0x2e, 0x73, 0xc1, 0xca, 0x63, 0x06, 0x93, 0xb7, 0x3a, 0x6e, 0x40, 0x5a, 0xc4, 0x8b, - 0xc2, 0xf8, 0x84, 0x94, 0xd0, 0x10, 0xc7, 0xd4, 0xd0, 0x27, 0xa5, 0x84, 0x78, 0xcd, 0xef, 0x78, - 0x51, 0x38, 0x5b, 0x62, 0xdd, 0xcb, 0xd4, 0xdd, 0xdd, 0x89, 0xf1, 0x92, 0x22, 0x64, 0x5e, 0x19, - 0x1b, 0xa4, 0xd0, 0xa7, 0x61, 0x9c, 0xff, 0xe7, 0x1a, 0xb0, 0x70, 0xf6, 0x34, 0xa3, 0x7d, 0x21, - 0x9f, 0x36, 0x47, 0x5c, 0x3a, 0x2d, 0x88, 0x8f, 0xeb, 0xa5, 0x21, 0x36, 0xa9, 0x21, 0x0c, 0xe3, - 0x4d, 0x77, 0x8f, 0x78, 0x24, 0x0c, 0xab, 0x81, 0xbf, 0x49, 0x66, 0x81, 0x0d, 0xcc, 0xd9, 0x6c, - 0x8d, 0x99, 0xbf, 0x49, 0x96, 0xa6, 0x29, 0xcd, 0x9b, 0x7a, 0x1d, 0x6c, 0x92, 0x40, 0xb7, 0x61, - 0x82, 0xbe, 0xd8, 0xdc, 0x98, 0xe8, 0x68, 0x2f, 0xa2, 0xec, 0x5d, 0x85, 0x8d, 0x4a, 0x38, 0x41, - 0x04, 0xdd, 0x82, 0xb1, 0x30, 0x72, 0x82, 0xa8, 0xd3, 0xe6, 0x44, 0xcf, 0xf4, 0x22, 0xca, 0x14, - 0xae, 0x35, 0xad, 0x0a, 0x36, 0x08, 0xa0, 0xd7, 0xa1, 0xd4, 0x74, 0xb7, 0x48, 0x7d, 0xbf, 0xde, - 0x24, 0xb3, 0x63, 0x8c, 0x5a, 0xe6, 0xa1, 0x72, 0x53, 0x22, 0x71, 0x3e, 0x57, 0xfd, 0xc5, 0x71, - 0x75, 0x74, 0x07, 0xce, 0x44, 0x24, 0x68, 0xb9, 0x9e, 0x43, 0x0f, 0x03, 0xf1, 0xb4, 0x62, 0x8a, - 0xcc, 0x71, 0xb6, 0xdb, 0xce, 0x8b, 0xd9, 0x38, 0xb3, 0x91, 0x89, 0x85, 0x73, 0x6a, 0xa3, 0xfb, - 0x30, 0x9b, 0x01, 0xf1, 0x9b, 0x6e, 0x7d, 0x7f, 0xf6, 0x14, 0xa3, 0xfc, 0x51, 0x41, 0x79, 0x76, - 0x23, 0x07, 0xef, 0xb0, 0x0b, 0x0c, 0xe7, 0x52, 0x47, 0xb7, 0x60, 0x92, 0x9d, 0x40, 0xd5, 0x4e, - 0xb3, 0x29, 0x1a, 0x9c, 0x60, 0x0d, 0x7e, 0x40, 0xde, 0xc7, 0x15, 0x13, 0x7c, 0x78, 0x30, 0x0f, - 0xf1, 0x3f, 0x9c, 0xac, 0x8d, 0x36, 0x99, 0xce, 0xac, 0x13, 0xb8, 0xd1, 0x3e, 0x3d, 0x37, 0xc8, - 0xfd, 0x68, 0x76, 0xb2, 0xab, 0xbc, 0x42, 0x47, 0x55, 0x8a, 0x35, 0xbd, 0x10, 0x27, 0x09, 0xd2, - 0x23, 0x35, 0x8c, 0x1a, 0xae, 0x37, 0x3b, 0xc5, 0xdf, 0x25, 0xf2, 0x44, 0xaa, 0xd1, 0x42, 0xcc, - 0x61, 0x4c, 0x5f, 0x46, 0x7f, 0xdc, 0xa2, 0x37, 0xd7, 0x34, 0x43, 0x8c, 0xf5, 0x65, 0x12, 0x80, - 0x63, 0x1c, 0xca, 0x4c, 0x46, 0xd1, 0xfe, 0x2c, 0x62, 0xa8, 0xea, 0x60, 0xd9, 0xd8, 0xf8, 0x24, - 0xa6, 0xe5, 0xf6, 0x26, 0x4c, 0xa8, 0x83, 0x90, 0x8d, 0x09, 0x9a, 0x87, 0x41, 0xc6, 0x3e, 0x09, - 0xe9, 0x5a, 0x89, 0x76, 0x81, 0xb1, 0x56, 0x98, 0x97, 0xb3, 0x2e, 0xb8, 0x6f, 0x93, 0xa5, 0xfd, - 0x88, 0xf0, 0x37, 0x7d, 0x51, 0xeb, 0x82, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0xdf, 0x9c, 0x0d, 0x8d, - 0x4f, 0xdb, 0x3e, 0xee, 0x97, 0x67, 0x61, 0x64, 0xc7, 0x0f, 0x23, 0x8a, 0xcd, 0xda, 0x18, 0x8c, - 0x19, 0xcf, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x8c, 0xd7, 0xf5, 0x06, 0xc4, 0xe5, 0xa8, - 0x8e, 0x11, 0xa3, 0x75, 0x6c, 0xe2, 0xa2, 0x97, 0x61, 0x84, 0xd9, 0x80, 0xd4, 0xfd, 0xa6, 0xe0, - 0xda, 0xe4, 0x0d, 0x3f, 0x52, 0x15, 0xe5, 0x87, 0xda, 0x6f, 0xac, 0xb0, 0xd1, 0x25, 0x18, 0xa2, - 0x5d, 0xa8, 0x54, 0xc5, 0xb5, 0xa4, 0x04, 0x45, 0xd7, 0x59, 0x29, 0x16, 0x50, 0xfb, 0xff, 0x2b, - 0x68, 0xa3, 0x4c, 0xdf, 0xc3, 0x04, 0x55, 0x61, 0xf8, 0x9e, 0xe3, 0x46, 0xae, 0xb7, 0x2d, 0xf8, - 0x8f, 0xa7, 0xbb, 0xde, 0x51, 0xac, 0xd2, 0x5d, 0x5e, 0x81, 0xdf, 0xa2, 0xe2, 0x0f, 0x96, 0x64, - 0x28, 0xc5, 0xa0, 0xe3, 0x79, 0x94, 0x62, 0xa1, 0x5f, 0x8a, 0x98, 0x57, 0xe0, 0x14, 0xc5, 0x1f, - 0x2c, 0xc9, 0xa0, 0x37, 0x01, 0xe4, 0x0e, 0x23, 0x0d, 0x61, 0x7b, 0xf1, 0x6c, 0x6f, 0xa2, 0x1b, - 0xaa, 0xce, 0xd2, 0x04, 0xbd, 0xa3, 0xe3, 0xff, 0x58, 0xa3, 0x67, 0x47, 0x8c, 0x4f, 0x4b, 0x77, - 0x06, 0x7d, 0x07, 0x5d, 0xe2, 0x4e, 0x10, 0x91, 0xc6, 0x62, 0x24, 0x06, 0xe7, 0x83, 0xfd, 0x3d, - 0x52, 0x36, 0xdc, 0x16, 0xd1, 0xb7, 0x83, 0x20, 0x82, 0x63, 0x7a, 0xf6, 0x2f, 0x15, 0x61, 0x36, - 0xaf, 0xbb, 0x74, 0xd1, 0x91, 0xfb, 0x6e, 0xb4, 0x4c, 0xd9, 0x2b, 0xcb, 0x5c, 0x74, 0x2b, 0xa2, - 0x1c, 0x2b, 0x0c, 0x3a, 0xfb, 0xa1, 0xbb, 0x2d, 0xdf, 0x98, 0x83, 0xf1, 0xec, 0xd7, 0x58, 0x29, - 0x16, 0x50, 0x8a, 0x17, 0x10, 0x27, 0x14, 0xc6, 0x3d, 0xda, 0x2a, 0xc1, 0xac, 0x14, 0x0b, 0xa8, - 0x2e, 0xed, 0x1a, 0xe8, 0x21, 0xed, 0x32, 0x86, 0x68, 0xf0, 0x78, 0x87, 0x08, 0x7d, 0x06, 0x60, - 0xcb, 0xf5, 0xdc, 0x70, 0x87, 0x51, 0x1f, 0x3a, 0x32, 0x75, 0xc5, 0x9c, 0xad, 0x2a, 0x2a, 0x58, - 0xa3, 0x88, 0x5e, 0x82, 0x51, 0xb5, 0x01, 0x2b, 0x65, 0xa6, 0xe9, 0xd4, 0x2c, 0x47, 0xe2, 0xd3, - 0xa8, 0x8c, 0x75, 0x3c, 0xfb, 0x73, 0xc9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0x56, 0xbf, 0xe3, - 0x5b, 0xe8, 0x3e, 0xbe, 0xf6, 0xd7, 0x8b, 0x30, 0x69, 0x34, 0xd6, 0x09, 0xfb, 0x38, 0xb3, 0xae, - 0xd1, 0x03, 0xdc, 0x89, 0x88, 0xd8, 0x7f, 0x76, 0xef, 0xad, 0xa2, 0x1f, 0xf2, 0x74, 0x07, 0xf0, - 0xfa, 0xe8, 0x33, 0x50, 0x6a, 0x3a, 0x21, 0x93, 0x9c, 0x11, 0xb1, 0xef, 0xfa, 0x21, 0x16, 0x3f, - 0x4c, 0x9c, 0x30, 0xd2, 0x6e, 0x4d, 0x4e, 0x3b, 0x26, 0x49, 0x6f, 0x1a, 0xca, 0x9f, 0x48, 0xeb, - 0x31, 0xd5, 0x09, 0xca, 0xc4, 0xec, 0x63, 0x0e, 0x43, 0x2f, 0xc3, 0x58, 0x40, 0xd8, 0xaa, 0x58, - 0xa6, 0xdc, 0x1c, 0x5b, 0x66, 0x83, 0x31, 0xdb, 0x87, 0x35, 0x18, 0x36, 0x30, 0xe3, 0xb7, 0xc1, - 0x50, 0x97, 0xb7, 0xc1, 0xd3, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xc2, 0x8b, 0xb1, - 0x84, 0x27, 0x17, 0xcc, 0x48, 0x7f, 0x0b, 0x86, 0xbe, 0x3e, 0xc4, 0xa2, 0x66, 0x5a, 0xe6, 0x11, - 0x7e, 0xca, 0x89, 0x25, 0x8f, 0x25, 0xcc, 0xfe, 0x20, 0x4c, 0x94, 0x1d, 0xd2, 0xf2, 0xbd, 0x15, - 0xaf, 0xd1, 0xf6, 0x5d, 0x2f, 0x42, 0xb3, 0x30, 0xc0, 0x2e, 0x11, 0x7e, 0x04, 0x0c, 0xd0, 0x86, - 0x30, 0x2b, 0xb1, 0xb7, 0xe1, 0x74, 0xd9, 0xbf, 0xe7, 0xdd, 0x73, 0x82, 0xc6, 0x62, 0xb5, 0xa2, - 0xbd, 0xaf, 0xd7, 0xe5, 0xfb, 0x8e, 0x1b, 0x6d, 0x65, 0x1e, 0xbd, 0x5a, 0x4d, 0xce, 0xd6, 0xae, - 0xba, 0x4d, 0x92, 0x23, 0x05, 0xf9, 0xcb, 0x05, 0xa3, 0xa5, 0x18, 0x5f, 0x69, 0xb5, 0xac, 0x5c, - 0xad, 0xd6, 0x1b, 0x30, 0xb2, 0xe5, 0x92, 0x66, 0x03, 0x93, 0x2d, 0xb1, 0x12, 0x9f, 0xca, 0xb7, - 0x43, 0x59, 0xa5, 0x98, 0x52, 0xea, 0xc5, 0x5f, 0x87, 0xab, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x2e, - 0x4c, 0xc9, 0x07, 0x83, 0x84, 0x8a, 0x75, 0xf9, 0x74, 0xb7, 0x57, 0x88, 0x49, 0xfc, 0xd4, 0x83, - 0x83, 0xf9, 0x29, 0x9c, 0x20, 0x83, 0x53, 0x84, 0xe9, 0x73, 0xb0, 0x45, 0x4f, 0xe0, 0x01, 0x36, - 0xfc, 0xec, 0x39, 0xc8, 0x5e, 0xb6, 0xac, 0xd4, 0xfe, 0x09, 0x0b, 0x1e, 0x4b, 0x8d, 0x8c, 0x78, - 0xe1, 0x1f, 0xf3, 0x2c, 0x24, 0x5f, 0xdc, 0x85, 0xde, 0x2f, 0x6e, 0xfb, 0x6f, 0x59, 0x70, 0x6a, - 0xa5, 0xd5, 0x8e, 0xf6, 0xcb, 0xae, 0xa9, 0x82, 0xfa, 0x08, 0x0c, 0xb5, 0x48, 0xc3, 0xed, 0xb4, - 0xc4, 0xcc, 0xcd, 0xcb, 0x53, 0x6a, 0x8d, 0x95, 0x1e, 0x1e, 0xcc, 0x8f, 0xd7, 0x22, 0x3f, 0x70, - 0xb6, 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x9d, 0xf5, 0xee, 0xdb, 0xe4, 0xa6, 0xdb, 0x72, 0xa5, 0x5d, - 0x51, 0x57, 0x99, 0xdd, 0x82, 0x1c, 0xd0, 0x85, 0x37, 0x3a, 0x8e, 0x17, 0xb9, 0xd1, 0xbe, 0xd0, - 0x1e, 0x49, 0x22, 0x38, 0xa6, 0x67, 0x7f, 0xcd, 0x82, 0x49, 0xb9, 0xee, 0x17, 0x1b, 0x8d, 0x80, - 0x84, 0x21, 0x9a, 0x83, 0x82, 0xdb, 0x16, 0xbd, 0x04, 0xd1, 0xcb, 0x42, 0xa5, 0x8a, 0x0b, 0x6e, - 0x5b, 0xb2, 0x65, 0xec, 0x20, 0x2c, 0x9a, 0x8a, 0xb4, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x19, - 0x46, 0x3c, 0xbf, 0xc1, 0x6d, 0xbb, 0xf8, 0x95, 0xc6, 0x16, 0xd8, 0xba, 0x28, 0xc3, 0x0a, 0x8a, - 0xaa, 0x50, 0xe2, 0x66, 0x4f, 0xf1, 0xa2, 0xed, 0xcb, 0x78, 0x8a, 0x7d, 0xd9, 0x86, 0xac, 0x89, - 0x63, 0x22, 0xf6, 0xaf, 0x59, 0x30, 0x26, 0xbf, 0xac, 0x4f, 0x9e, 0x93, 0x6e, 0xad, 0x98, 0xdf, - 0x8c, 0xb7, 0x16, 0xe5, 0x19, 0x19, 0xc4, 0x60, 0x15, 0x8b, 0x47, 0x62, 0x15, 0xaf, 0xc2, 0xa8, - 0xd3, 0x6e, 0x57, 0x4d, 0x3e, 0x93, 0x2d, 0xa5, 0xc5, 0xb8, 0x18, 0xeb, 0x38, 0xf6, 0x8f, 0x17, - 0x60, 0x42, 0x7e, 0x41, 0xad, 0xb3, 0x19, 0x92, 0x08, 0x6d, 0x40, 0xc9, 0xe1, 0xb3, 0x44, 0xe4, - 0x22, 0xbf, 0x98, 0x2d, 0x47, 0x30, 0xa6, 0x34, 0xbe, 0xf0, 0x17, 0x65, 0x6d, 0x1c, 0x13, 0x42, - 0x4d, 0x98, 0xf6, 0xfc, 0x88, 0x1d, 0xfe, 0x0a, 0xde, 0x4d, 0xb5, 0x93, 0xa4, 0x7e, 0x56, 0x50, - 0x9f, 0x5e, 0x4f, 0x52, 0xc1, 0x69, 0xc2, 0x68, 0x45, 0xca, 0x66, 0x8a, 0xf9, 0xc2, 0x00, 0x7d, - 0xe2, 0xb2, 0x45, 0x33, 0xf6, 0xaf, 0x5a, 0x50, 0x92, 0x68, 0x27, 0xa1, 0xc5, 0x5b, 0x83, 0xe1, - 0x90, 0x4d, 0x82, 0x1c, 0x1a, 0xbb, 0x5b, 0xc7, 0xf9, 0x7c, 0xc5, 0x77, 0x1a, 0xff, 0x1f, 0x62, - 0x49, 0x83, 0x89, 0xe6, 0x55, 0xf7, 0xdf, 0x25, 0xa2, 0x79, 0xd5, 0x9f, 0x9c, 0x4b, 0xe9, 0x8f, - 0x58, 0x9f, 0x35, 0x59, 0x17, 0x65, 0xbd, 0xda, 0x01, 0xd9, 0x72, 0xef, 0x27, 0x59, 0xaf, 0x2a, - 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x84, 0xb1, 0xba, 0x94, 0xc9, 0xc6, 0x3b, 0xfc, 0x52, 0x57, 0xfd, - 0x80, 0x52, 0x25, 0x71, 0x59, 0xc8, 0xb2, 0x56, 0x1f, 0x1b, 0xd4, 0x4c, 0x33, 0x82, 0x62, 0x2f, - 0x33, 0x82, 0x98, 0x6e, 0xbe, 0x52, 0xfd, 0x27, 0x2d, 0x18, 0xe2, 0xb2, 0xb8, 0xfe, 0x44, 0xa1, - 0x9a, 0x66, 0x2d, 0x1e, 0xbb, 0x3b, 0xb4, 0x50, 0x68, 0xca, 0xd0, 0x1a, 0x94, 0xd8, 0x0f, 0x26, - 0x4b, 0x2c, 0xe6, 0x5b, 0xdd, 0xf3, 0x56, 0xf5, 0x0e, 0xde, 0x91, 0xd5, 0x70, 0x4c, 0xc1, 0xfe, - 0xd1, 0x22, 0x3d, 0xdd, 0x62, 0x54, 0xe3, 0xd2, 0xb7, 0x1e, 0xdd, 0xa5, 0x5f, 0x78, 0x54, 0x97, - 0xfe, 0x36, 0x4c, 0xd6, 0x35, 0x3d, 0x5c, 0x3c, 0x93, 0x97, 0xbb, 0x2e, 0x12, 0x4d, 0x65, 0xc7, - 0xa5, 0x2c, 0xcb, 0x26, 0x11, 0x9c, 0xa4, 0x8a, 0xbe, 0x03, 0xc6, 0xf8, 0x3c, 0x8b, 0x56, 0xb8, - 0x25, 0xc6, 0x07, 0xf2, 0xd7, 0x8b, 0xde, 0x04, 0x97, 0xca, 0x69, 0xd5, 0xb1, 0x41, 0xcc, 0xfe, - 0x53, 0x0b, 0xd0, 0x4a, 0x7b, 0x87, 0xb4, 0x48, 0xe0, 0x34, 0x63, 0x71, 0xfa, 0x0f, 0x5a, 0x30, - 0x4b, 0x52, 0xc5, 0xcb, 0x7e, 0xab, 0x25, 0x1e, 0x2d, 0x39, 0xef, 0xea, 0x95, 0x9c, 0x3a, 0xca, - 0x2d, 0x61, 0x36, 0x0f, 0x03, 0xe7, 0xb6, 0x87, 0xd6, 0x60, 0x86, 0xdf, 0x92, 0x0a, 0xa0, 0xd9, - 0x5e, 0x3f, 0x2e, 0x08, 0xcf, 0x6c, 0xa4, 0x51, 0x70, 0x56, 0x3d, 0xfb, 0x7b, 0xc6, 0x20, 0xb7, - 0x17, 0xef, 0xe9, 0x11, 0xde, 0xd3, 0x23, 0xbc, 0xa7, 0x47, 0x78, 0x4f, 0x8f, 0xf0, 0x9e, 0x1e, - 0xe1, 0x5b, 0x5e, 0x8f, 0xf0, 0xff, 0x5b, 0x70, 0x5a, 0x5d, 0x03, 0xc6, 0xc3, 0xf7, 0xf3, 0x30, - 0xc3, 0xb7, 0xdb, 0x72, 0xd3, 0x71, 0x5b, 0x1b, 0xa4, 0xd5, 0x6e, 0x3a, 0x91, 0xd4, 0xba, 0x5f, - 0xcd, 0x5c, 0xb9, 0x09, 0x8b, 0x55, 0xa3, 0xe2, 0xd2, 0x63, 0xf4, 0x7a, 0xca, 0x00, 0xe0, 0xac, - 0x66, 0xec, 0x5f, 0x1a, 0x81, 0xc1, 0x95, 0x3d, 0xe2, 0x45, 0x27, 0xf0, 0x44, 0xa8, 0xc3, 0x84, - 0xeb, 0xed, 0xf9, 0xcd, 0x3d, 0xd2, 0xe0, 0xf0, 0xa3, 0xbc, 0x64, 0xcf, 0x08, 0xd2, 0x13, 0x15, - 0x83, 0x04, 0x4e, 0x90, 0x7c, 0x14, 0xd2, 0xe4, 0x6b, 0x30, 0xc4, 0x0f, 0x71, 0x21, 0x4a, 0xce, - 0x3c, 0xb3, 0xd9, 0x20, 0x8a, 0xab, 0x29, 0x96, 0x74, 0xf3, 0x4b, 0x42, 0x54, 0x47, 0x9f, 0x83, - 0x89, 0x2d, 0x37, 0x08, 0xa3, 0x0d, 0xb7, 0x45, 0xc2, 0xc8, 0x69, 0xb5, 0x1f, 0x42, 0x7a, 0xac, - 0xc6, 0x61, 0xd5, 0xa0, 0x84, 0x13, 0x94, 0xd1, 0x36, 0x8c, 0x37, 0x1d, 0xbd, 0xa9, 0xe1, 0x23, - 0x37, 0xa5, 0x6e, 0x87, 0x9b, 0x3a, 0x21, 0x6c, 0xd2, 0xa5, 0xdb, 0xa9, 0xce, 0x04, 0xa0, 0x23, - 0x4c, 0x2c, 0xa0, 0xb6, 0x13, 0x97, 0x7c, 0x72, 0x18, 0x65, 0x74, 0x98, 0x81, 0x6c, 0xc9, 0x64, - 0x74, 0x34, 0x33, 0xd8, 0xcf, 0x42, 0x89, 0xd0, 0x21, 0xa4, 0x84, 0xc5, 0x05, 0x73, 0xa5, 0xbf, - 0xbe, 0xae, 0xb9, 0xf5, 0xc0, 0x37, 0xe5, 0xf6, 0x2b, 0x92, 0x12, 0x8e, 0x89, 0xa2, 0x65, 0x18, - 0x0a, 0x49, 0xe0, 0x92, 0x50, 0x5c, 0x35, 0x5d, 0xa6, 0x91, 0xa1, 0x71, 0xdf, 0x12, 0xfe, 0x1b, - 0x8b, 0xaa, 0x74, 0x79, 0x39, 0x4c, 0xa4, 0xc9, 0x2e, 0x03, 0x6d, 0x79, 0x2d, 0xb2, 0x52, 0x2c, - 0xa0, 0xe8, 0x75, 0x18, 0x0e, 0x48, 0x93, 0x29, 0x86, 0xc6, 0xfb, 0x5f, 0xe4, 0x5c, 0xcf, 0xc4, - 0xeb, 0x61, 0x49, 0x00, 0xdd, 0x00, 0x14, 0x10, 0xca, 0x28, 0xb9, 0xde, 0xb6, 0x32, 0x1b, 0x15, - 0x07, 0xad, 0x62, 0x48, 0x71, 0x8c, 0x21, 0xdd, 0x7c, 0x70, 0x46, 0x35, 0x74, 0x0d, 0xa6, 0x55, - 0x69, 0xc5, 0x0b, 0x23, 0x87, 0x1e, 0x70, 0x93, 0x8c, 0x96, 0x92, 0x53, 0xe0, 0x24, 0x02, 0x4e, - 0xd7, 0xb1, 0xbf, 0x6c, 0x01, 0x1f, 0xe7, 0x13, 0x78, 0x9d, 0xbf, 0x66, 0xbe, 0xce, 0xcf, 0xe6, - 0xce, 0x5c, 0xce, 0xcb, 0xfc, 0xcb, 0x16, 0x8c, 0x6a, 0x33, 0x1b, 0xaf, 0x59, 0xab, 0xcb, 0x9a, - 0xed, 0xc0, 0x14, 0x5d, 0xe9, 0xb7, 0x36, 0x43, 0x12, 0xec, 0x91, 0x06, 0x5b, 0x98, 0x85, 0x87, - 0x5b, 0x98, 0xca, 0x44, 0xed, 0x66, 0x82, 0x20, 0x4e, 0x35, 0x61, 0x7f, 0x56, 0x76, 0x55, 0x59, - 0xf4, 0xd5, 0xd5, 0x9c, 0x27, 0x2c, 0xfa, 0xd4, 0xac, 0xe2, 0x18, 0x87, 0x6e, 0xb5, 0x1d, 0x3f, - 0x8c, 0x92, 0x16, 0x7d, 0xd7, 0xfd, 0x30, 0xc2, 0x0c, 0x62, 0xbf, 0x00, 0xb0, 0x72, 0x9f, 0xd4, - 0xf9, 0x8a, 0xd5, 0x1f, 0x0f, 0x56, 0xfe, 0xe3, 0xc1, 0xfe, 0x6d, 0x0b, 0x26, 0x56, 0x97, 0x8d, - 0x9b, 0x6b, 0x01, 0x80, 0xbf, 0x78, 0xee, 0xde, 0x5d, 0x97, 0xea, 0x70, 0xae, 0xd1, 0x54, 0xa5, - 0x58, 0xc3, 0x40, 0x67, 0xa1, 0xd8, 0xec, 0x78, 0x42, 0x7c, 0x38, 0x4c, 0xaf, 0xc7, 0x9b, 0x1d, - 0x0f, 0xd3, 0x32, 0xcd, 0xa5, 0xa0, 0xd8, 0xb7, 0x4b, 0x41, 0x4f, 0xd7, 0x7e, 0x34, 0x0f, 0x83, - 0xf7, 0xee, 0xb9, 0x0d, 0xee, 0x40, 0x29, 0x54, 0xf5, 0x77, 0xef, 0x56, 0xca, 0x21, 0xe6, 0xe5, - 0xf6, 0x97, 0x8a, 0x30, 0xb7, 0xda, 0x24, 0xf7, 0xdf, 0xa1, 0x13, 0x69, 0xbf, 0x0e, 0x11, 0x47, - 0x13, 0xc4, 0x1c, 0xd5, 0xe9, 0xa5, 0xf7, 0x78, 0x6c, 0xc1, 0x30, 0x37, 0x68, 0x93, 0x2e, 0xa5, - 0xaf, 0x66, 0xb5, 0x9e, 0x3f, 0x20, 0x0b, 0xdc, 0x30, 0x4e, 0x78, 0xc4, 0xa9, 0x0b, 0x53, 0x94, - 0x62, 0x49, 0x7c, 0xee, 0x15, 0x18, 0xd3, 0x31, 0x8f, 0xe4, 0x7e, 0xf6, 0xff, 0x14, 0x61, 0x8a, - 0xf6, 0xe0, 0x91, 0x4e, 0xc4, 0xed, 0xf4, 0x44, 0x1c, 0xb7, 0x0b, 0x52, 0xef, 0xd9, 0x78, 0x33, - 0x39, 0x1b, 0x57, 0xf3, 0x66, 0xe3, 0xa4, 0xe7, 0xe0, 0xbb, 0x2d, 0x98, 0x59, 0x6d, 0xfa, 0xf5, - 0xdd, 0x84, 0x9b, 0xd0, 0x4b, 0x30, 0x4a, 0x8f, 0xe3, 0xd0, 0xf0, 0x60, 0x37, 0x62, 0x1a, 0x08, - 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0xb7, 0x6f, 0x57, 0xca, 0x59, 0xa1, 0x10, 0x04, 0x08, 0xeb, 0x78, - 0xf6, 0x6f, 0x5a, 0x70, 0xee, 0xda, 0xf2, 0x4a, 0xbc, 0x14, 0x53, 0xd1, 0x18, 0x2e, 0xc1, 0x50, - 0xbb, 0xa1, 0x75, 0x25, 0x16, 0xaf, 0x96, 0x59, 0x2f, 0x04, 0xf4, 0xdd, 0x12, 0x69, 0xe4, 0x67, - 0x2d, 0x98, 0xb9, 0xe6, 0x46, 0xf4, 0x76, 0x4d, 0xc6, 0x05, 0xa0, 0xd7, 0x6b, 0xe8, 0x46, 0x7e, - 0xb0, 0x9f, 0x8c, 0x0b, 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, 0x3d, 0x97, 0x99, 0x52, 0x17, - 0x4c, 0x45, 0x13, 0x16, 0xe5, 0x58, 0x61, 0xd0, 0x0f, 0x6b, 0xb8, 0x01, 0x93, 0xd1, 0xed, 0x8b, - 0x13, 0x56, 0x7d, 0x58, 0x59, 0x02, 0x70, 0x8c, 0x63, 0xff, 0xb1, 0x05, 0xf3, 0xd7, 0x9a, 0x9d, - 0x30, 0x22, 0xc1, 0x56, 0x98, 0x73, 0x3a, 0xbe, 0x00, 0x25, 0x22, 0x25, 0xe2, 0xa2, 0xd7, 0x8a, - 0x63, 0x54, 0xa2, 0x72, 0x1e, 0x9e, 0x40, 0xe1, 0xf5, 0xe1, 0x74, 0x78, 0x34, 0xaf, 0xb1, 0x55, - 0x40, 0x44, 0x6f, 0x4b, 0x8f, 0xd7, 0xc0, 0x1c, 0xbf, 0x57, 0x52, 0x50, 0x9c, 0x51, 0xc3, 0xfe, - 0x09, 0x0b, 0x4e, 0xab, 0x0f, 0x7e, 0xd7, 0x7d, 0xa6, 0xfd, 0xf3, 0x05, 0x18, 0xbf, 0xbe, 0xb1, - 0x51, 0xbd, 0x46, 0x22, 0x71, 0x6d, 0xf7, 0xd6, 0x73, 0x63, 0x4d, 0x5d, 0xd7, 0xed, 0x31, 0xd7, - 0x89, 0xdc, 0xe6, 0x02, 0x0f, 0xfb, 0xb3, 0x50, 0xf1, 0xa2, 0x5b, 0x41, 0x2d, 0x0a, 0x5c, 0x6f, - 0x3b, 0x53, 0xc1, 0x27, 0x99, 0x8b, 0x62, 0x1e, 0x73, 0x81, 0x5e, 0x80, 0x21, 0x16, 0x77, 0x48, - 0x4e, 0xc2, 0xe3, 0xea, 0x2d, 0xc4, 0x4a, 0x0f, 0x0f, 0xe6, 0x4b, 0xb7, 0x71, 0x85, 0xff, 0xc1, - 0x02, 0x15, 0xdd, 0x86, 0xd1, 0x9d, 0x28, 0x6a, 0x5f, 0x27, 0x4e, 0x83, 0x04, 0xf2, 0x38, 0x3c, - 0x9f, 0x75, 0x1c, 0xd2, 0x41, 0xe0, 0x68, 0xf1, 0x09, 0x12, 0x97, 0x85, 0x58, 0xa7, 0x63, 0xd7, - 0x00, 0x62, 0xd8, 0x31, 0x69, 0x2a, 0xec, 0x3f, 0xb4, 0x60, 0x98, 0x87, 0x80, 0x08, 0xd0, 0x47, - 0x61, 0x80, 0xdc, 0x27, 0x75, 0xc1, 0xf1, 0x66, 0x76, 0x38, 0xe6, 0xb4, 0xb8, 0xc4, 0x95, 0xfe, - 0xc7, 0xac, 0x16, 0xba, 0x0e, 0xc3, 0xb4, 0xb7, 0xd7, 0x54, 0x3c, 0x8c, 0x27, 0xf3, 0xbe, 0x58, - 0x4d, 0x3b, 0x67, 0xce, 0x44, 0x11, 0x96, 0xd5, 0x99, 0x7a, 0xb8, 0xde, 0xae, 0xd1, 0x13, 0x3b, - 0xea, 0xc6, 0x58, 0x6c, 0x2c, 0x57, 0x39, 0x92, 0xa0, 0xc6, 0xd5, 0xc3, 0xb2, 0x10, 0xc7, 0x44, - 0xec, 0x0d, 0x28, 0xd1, 0x49, 0x5d, 0x6c, 0xba, 0x4e, 0x77, 0x8d, 0xf7, 0x33, 0x50, 0x92, 0xfa, - 0xec, 0x50, 0xb8, 0x7e, 0x33, 0xaa, 0x52, 0xdd, 0x1d, 0xe2, 0x18, 0x6e, 0x6f, 0xc1, 0x29, 0x66, - 0x9d, 0xe8, 0x44, 0x3b, 0xc6, 0x1e, 0xeb, 0xbd, 0x98, 0x9f, 0x15, 0x0f, 0x48, 0x3e, 0x33, 0xb3, - 0x9a, 0x77, 0xe5, 0x98, 0xa4, 0x18, 0x3f, 0x26, 0xed, 0xaf, 0x0f, 0xc0, 0xe3, 0x95, 0x5a, 0x7e, - 0x74, 0x90, 0x97, 0x61, 0x8c, 0xf3, 0xa5, 0x74, 0x69, 0x3b, 0x4d, 0xd1, 0xae, 0x12, 0xb5, 0x6e, - 0x68, 0x30, 0x6c, 0x60, 0xa2, 0x73, 0x50, 0x74, 0xdf, 0xf2, 0x92, 0xbe, 0x47, 0x95, 0x37, 0xd6, - 0x31, 0x2d, 0xa7, 0x60, 0xca, 0xe2, 0xf2, 0xbb, 0x43, 0x81, 0x15, 0x9b, 0xfb, 0x1a, 0x4c, 0xb8, - 0x61, 0x3d, 0x74, 0x2b, 0x1e, 0x3d, 0x67, 0xb4, 0x93, 0x4a, 0x09, 0x37, 0x68, 0xa7, 0x15, 0x14, - 0x27, 0xb0, 0xb5, 0x8b, 0x6c, 0xb0, 0x6f, 0x36, 0xb9, 0xa7, 0x2f, 0x34, 0x7d, 0x01, 0xb4, 0xd9, - 0xd7, 0x85, 0x4c, 0x66, 0x2e, 0x5e, 0x00, 0xfc, 0x83, 0x43, 0x2c, 0x61, 0xf4, 0xe5, 0x58, 0xdf, - 0x71, 0xda, 0x8b, 0x9d, 0x68, 0xa7, 0xec, 0x86, 0x75, 0x7f, 0x8f, 0x04, 0xfb, 0xec, 0xd1, 0x3f, - 0x12, 0xbf, 0x1c, 0x15, 0x60, 0xf9, 0xfa, 0x62, 0x95, 0x62, 0xe2, 0x74, 0x1d, 0xb4, 0x08, 0x93, - 0xb2, 0xb0, 0x46, 0x42, 0x76, 0x85, 0x8d, 0x32, 0x32, 0xca, 0x1b, 0x48, 0x14, 0x2b, 0x22, 0x49, - 0x7c, 0x93, 0x93, 0x86, 0xe3, 0xe0, 0xa4, 0x3f, 0x02, 0xe3, 0xae, 0xe7, 0x46, 0xae, 0x13, 0xf9, - 0x5c, 0xe1, 0xc3, 0xdf, 0xf7, 0x4c, 0x92, 0x5d, 0xd1, 0x01, 0xd8, 0xc4, 0xb3, 0xff, 0xf3, 0x00, - 0x4c, 0xb3, 0x69, 0x7b, 0x6f, 0x85, 0x7d, 0x2b, 0xad, 0xb0, 0xdb, 0xe9, 0x15, 0x76, 0x1c, 0x4f, - 0x84, 0x87, 0x5e, 0x66, 0x9f, 0x83, 0x92, 0x72, 0x80, 0x92, 0x1e, 0x90, 0x56, 0x8e, 0x07, 0x64, - 0x6f, 0xee, 0x43, 0xda, 0x90, 0x15, 0x33, 0x6d, 0xc8, 0xfe, 0xaa, 0x05, 0xb1, 0x06, 0x03, 0x5d, - 0x87, 0x52, 0xdb, 0x67, 0xa6, 0x91, 0x81, 0xb4, 0x37, 0x7e, 0x3c, 0xf3, 0xa2, 0xe2, 0x97, 0x22, - 0xff, 0xf8, 0xaa, 0xac, 0x81, 0xe3, 0xca, 0x68, 0x09, 0x86, 0xdb, 0x01, 0xa9, 0x45, 0x2c, 0x48, - 0x48, 0x4f, 0x3a, 0x7c, 0x8d, 0x70, 0x7c, 0x2c, 0x2b, 0xda, 0xbf, 0x60, 0x01, 0x70, 0x33, 0x2d, - 0xc7, 0xdb, 0x26, 0x27, 0x20, 0xb5, 0x2e, 0xc3, 0x40, 0xd8, 0x26, 0xf5, 0x6e, 0x46, 0xab, 0x71, - 0x7f, 0x6a, 0x6d, 0x52, 0x8f, 0x07, 0x9c, 0xfe, 0xc3, 0xac, 0xb6, 0xfd, 0xbd, 0x00, 0x13, 0x31, - 0x5a, 0x25, 0x22, 0x2d, 0xf4, 0x9c, 0x11, 0x34, 0xe0, 0x6c, 0x22, 0x68, 0x40, 0x89, 0x61, 0x6b, - 0x02, 0xd2, 0xcf, 0x41, 0xb1, 0xe5, 0xdc, 0x17, 0x12, 0xb0, 0x67, 0xba, 0x77, 0x83, 0xd2, 0x5f, - 0x58, 0x73, 0xee, 0xf3, 0x47, 0xe2, 0x33, 0x72, 0x81, 0xac, 0x39, 0xf7, 0x0f, 0xb9, 0x69, 0x2a, - 0x3b, 0xa4, 0x6e, 0xba, 0x61, 0xf4, 0x85, 0xff, 0x14, 0xff, 0x67, 0xcb, 0x8e, 0x36, 0xc2, 0xda, - 0x72, 0x3d, 0x61, 0x81, 0xd4, 0x57, 0x5b, 0xae, 0x97, 0x6c, 0xcb, 0xf5, 0xfa, 0x68, 0xcb, 0xf5, - 0xd0, 0xdb, 0x30, 0x2c, 0x0c, 0x04, 0x45, 0x90, 0x9e, 0x2b, 0x7d, 0xb4, 0x27, 0xec, 0x0b, 0x79, - 0x9b, 0x57, 0xe4, 0x23, 0x58, 0x94, 0xf6, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x92, 0x05, 0x13, 0xe2, - 0x37, 0x26, 0x6f, 0x75, 0x48, 0x18, 0x09, 0xde, 0xf3, 0xc3, 0xfd, 0xf7, 0x41, 0x54, 0xe4, 0x5d, - 0xf9, 0xb0, 0x3c, 0x66, 0x4d, 0x60, 0xcf, 0x1e, 0x25, 0x7a, 0x81, 0xfe, 0x8e, 0x05, 0xa7, 0x5a, - 0xce, 0x7d, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0x42, 0xd1, 0xfe, 0xd1, 0xfe, 0xa6, 0x3f, - 0x55, 0x9d, 0x77, 0x52, 0x6a, 0x03, 0x4f, 0x65, 0xa1, 0xf4, 0xec, 0x6a, 0x66, 0xbf, 0xe6, 0xb6, - 0x60, 0x44, 0xae, 0xb7, 0x0c, 0x51, 0x43, 0x59, 0x67, 0xac, 0x8f, 0x6c, 0x9f, 0xa9, 0x3b, 0xe3, - 0xd3, 0x76, 0xc4, 0x5a, 0x7b, 0xa4, 0xed, 0x7c, 0x0e, 0xc6, 0xf4, 0x35, 0xf6, 0x48, 0xdb, 0x7a, - 0x0b, 0x66, 0x32, 0xd6, 0xd2, 0x23, 0x6d, 0xf2, 0x1e, 0x9c, 0xcd, 0x5d, 0x1f, 0x8f, 0xb2, 0x61, - 0xfb, 0xe7, 0x2d, 0xfd, 0x1c, 0x3c, 0x01, 0xd5, 0xc1, 0xb2, 0xa9, 0x3a, 0x38, 0xdf, 0x7d, 0xe7, - 0xe4, 0xe8, 0x0f, 0xde, 0xd4, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x3a, 0x0c, 0x35, 0x69, 0x89, 0x34, - 0x33, 0xb5, 0x7b, 0xef, 0xc8, 0x98, 0x97, 0x62, 0xe5, 0x21, 0x16, 0x14, 0xec, 0x5f, 0xb6, 0x60, - 0xe0, 0x04, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x5c, 0x2e, 0x69, 0x11, 0x3f, 0x78, 0x01, 0x3b, 0xf7, - 0x56, 0xee, 0x47, 0xc4, 0x0b, 0xd9, 0x53, 0x31, 0x73, 0x60, 0x7e, 0xda, 0x82, 0x99, 0x9b, 0xbe, - 0xd3, 0x58, 0x72, 0x9a, 0x8e, 0x57, 0x27, 0x41, 0xc5, 0xdb, 0x3e, 0x92, 0x8d, 0x74, 0xa1, 0xa7, - 0x8d, 0xf4, 0xb2, 0x34, 0x31, 0x1a, 0xc8, 0x9f, 0x3f, 0xca, 0x48, 0x26, 0xc3, 0xa8, 0x18, 0xc6, - 0xb0, 0x3b, 0x80, 0xf4, 0x5e, 0x0a, 0x8f, 0x15, 0x0c, 0xc3, 0x2e, 0xef, 0xaf, 0x98, 0xc4, 0xa7, - 0xb2, 0x19, 0xbc, 0xd4, 0xe7, 0x69, 0xbe, 0x18, 0xbc, 0x00, 0x4b, 0x42, 0xf6, 0xcb, 0x90, 0xe9, - 0xf6, 0xde, 0x5b, 0xf8, 0x60, 0x7f, 0x12, 0xa6, 0x59, 0xcd, 0x23, 0x3e, 0x8c, 0xed, 0x84, 0x6c, - 0x33, 0x23, 0x20, 0x9e, 0xfd, 0x45, 0x0b, 0x26, 0xd7, 0x13, 0x71, 0xc2, 0x2e, 0x31, 0x6d, 0x68, - 0x86, 0x48, 0xbd, 0xc6, 0x4a, 0xb1, 0x80, 0x1e, 0xbb, 0x24, 0xeb, 0x2f, 0x2c, 0x88, 0x23, 0x51, - 0x9c, 0x00, 0xfb, 0xb6, 0x6c, 0xb0, 0x6f, 0x99, 0x12, 0x16, 0xd5, 0x9d, 0x3c, 0xee, 0x0d, 0xdd, - 0x50, 0x31, 0x9a, 0xba, 0x08, 0x57, 0x62, 0x32, 0x7c, 0x29, 0x4e, 0x98, 0x81, 0x9c, 0x64, 0xd4, - 0x26, 0xfb, 0x77, 0x0a, 0x80, 0x14, 0x6e, 0xdf, 0x31, 0xa4, 0xd2, 0x35, 0x8e, 0x27, 0x86, 0xd4, - 0x1e, 0x20, 0xa6, 0xcf, 0x0f, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0xbb, 0x3b, 0x9a, 0xb1, 0xc0, - 0x9c, 0x68, 0x12, 0xdd, 0x4c, 0x51, 0xc3, 0x19, 0x2d, 0x68, 0x76, 0x1a, 0x83, 0xfd, 0xda, 0x69, - 0x0c, 0xf5, 0xf0, 0x4a, 0xfb, 0x39, 0x0b, 0xc6, 0xd5, 0x30, 0xbd, 0x4b, 0x6c, 0xc6, 0x55, 0x7f, - 0x72, 0x0e, 0xd0, 0xaa, 0xd6, 0x65, 0x76, 0xb1, 0x7c, 0x3b, 0xf3, 0x2e, 0x74, 0x9a, 0xee, 0xdb, - 0x44, 0x45, 0xf0, 0x9b, 0x17, 0xde, 0x82, 0xa2, 0xf4, 0xf0, 0x60, 0x7e, 0x5c, 0xfd, 0xe3, 0x11, - 0x83, 0xe3, 0x2a, 0xf4, 0x48, 0x9e, 0x4c, 0x2c, 0x45, 0xf4, 0x12, 0x0c, 0xb6, 0x77, 0x9c, 0x90, - 0x24, 0x7c, 0x6b, 0x06, 0xab, 0xb4, 0xf0, 0xf0, 0x60, 0x7e, 0x42, 0x55, 0x60, 0x25, 0x98, 0x63, - 0xf7, 0x1f, 0x99, 0x2b, 0xbd, 0x38, 0x7b, 0x46, 0xe6, 0xfa, 0x53, 0x0b, 0x06, 0xd6, 0xfd, 0xc6, - 0x49, 0x1c, 0x01, 0xaf, 0x19, 0x47, 0xc0, 0x13, 0x79, 0xc1, 0xdc, 0x73, 0x77, 0xff, 0x6a, 0x62, - 0xf7, 0x9f, 0xcf, 0xa5, 0xd0, 0x7d, 0xe3, 0xb7, 0x60, 0x94, 0x85, 0x88, 0x17, 0x7e, 0x44, 0x2f, - 0x18, 0x1b, 0x7e, 0x3e, 0xb1, 0xe1, 0x27, 0x35, 0x54, 0x6d, 0xa7, 0x3f, 0x0d, 0xc3, 0xc2, 0x31, - 0x25, 0xe9, 0xa4, 0x29, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x59, 0x04, 0x23, 0x24, 0x3d, 0xfa, 0x55, - 0x0b, 0x16, 0x02, 0x6e, 0xb0, 0xda, 0x28, 0x77, 0x02, 0xd7, 0xdb, 0xae, 0xd5, 0x77, 0x48, 0xa3, - 0xd3, 0x74, 0xbd, 0xed, 0xca, 0xb6, 0xe7, 0xab, 0xe2, 0x95, 0xfb, 0xa4, 0xde, 0x61, 0x4a, 0xb0, - 0x1e, 0xf1, 0xef, 0x95, 0xe1, 0xf7, 0xf3, 0x0f, 0x0e, 0xe6, 0x17, 0xf0, 0x91, 0x68, 0xe3, 0x23, - 0xf6, 0x05, 0xfd, 0xa6, 0x05, 0x57, 0x78, 0xa4, 0xf6, 0xfe, 0xfb, 0xdf, 0xe5, 0xb5, 0x5c, 0x95, - 0xa4, 0x62, 0x22, 0x1b, 0x24, 0x68, 0x2d, 0x7d, 0x44, 0x0c, 0xe8, 0x95, 0xea, 0xd1, 0xda, 0xc2, - 0x47, 0xed, 0x9c, 0xfd, 0x4f, 0x8a, 0x30, 0x2e, 0x22, 0x38, 0x89, 0x3b, 0xe0, 0x25, 0x63, 0x49, - 0x3c, 0x99, 0x58, 0x12, 0xd3, 0x06, 0xf2, 0xf1, 0x1c, 0xff, 0x21, 0x4c, 0xd3, 0xc3, 0xf9, 0x3a, - 0x71, 0x82, 0x68, 0x93, 0x38, 0xdc, 0xfc, 0xaa, 0x78, 0xe4, 0xd3, 0x5f, 0x89, 0xe7, 0x6e, 0x26, - 0x89, 0xe1, 0x34, 0xfd, 0x6f, 0xa5, 0x3b, 0xc7, 0x83, 0xa9, 0x54, 0x10, 0xae, 0x4f, 0x41, 0x49, - 0x79, 0x55, 0x88, 0x43, 0xa7, 0x7b, 0x2c, 0xbb, 0x24, 0x05, 0x2e, 0x42, 0x8b, 0x3d, 0x7a, 0x62, - 0x72, 0xf6, 0xdf, 0x2b, 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x87, 0x11, 0x27, 0x0c, 0xdd, 0x6d, 0x8f, - 0x34, 0xc4, 0x8e, 0x7d, 0x7f, 0xde, 0x8e, 0x35, 0x9a, 0x61, 0x9e, 0x2d, 0x8b, 0xa2, 0x26, 0x56, - 0x34, 0xd0, 0x75, 0x6e, 0xe4, 0xb6, 0x27, 0xdf, 0x7b, 0xfd, 0x51, 0x03, 0x69, 0x06, 0xb7, 0x47, - 0xb0, 0xa8, 0x8f, 0x3e, 0xcd, 0xad, 0x10, 0x6f, 0x78, 0xfe, 0x3d, 0xef, 0x9a, 0xef, 0xcb, 0x28, - 0x09, 0xfd, 0x11, 0x9c, 0x96, 0xb6, 0x87, 0xaa, 0x3a, 0x36, 0xa9, 0xf5, 0x17, 0xd5, 0xf2, 0xf3, - 0x30, 0x43, 0x49, 0x9b, 0x4e, 0xcc, 0x21, 0x22, 0x30, 0x29, 0xc2, 0x83, 0xc9, 0x32, 0x31, 0x76, - 0x99, 0x4f, 0x39, 0xb3, 0x76, 0x2c, 0x47, 0xbe, 0x61, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0xcf, 0x58, - 0xc0, 0x1c, 0x3a, 0x4f, 0x80, 0x1f, 0xf9, 0x98, 0xc9, 0x8f, 0xcc, 0xe6, 0x0d, 0x72, 0x0e, 0x2b, - 0xf2, 0x22, 0x5f, 0x59, 0xd5, 0xc0, 0xbf, 0xbf, 0x2f, 0x4c, 0x47, 0x7a, 0xbf, 0x3f, 0xec, 0xff, - 0x65, 0xf1, 0x43, 0x4c, 0xf9, 0x3c, 0xa0, 0xef, 0x84, 0x91, 0xba, 0xd3, 0x76, 0xea, 0x3c, 0x7f, - 0x4a, 0xae, 0x44, 0xcf, 0xa8, 0xb4, 0xb0, 0x2c, 0x6a, 0x70, 0x09, 0x95, 0x0c, 0x33, 0x37, 0x22, - 0x8b, 0x7b, 0x4a, 0xa5, 0x54, 0x93, 0x73, 0xbb, 0x30, 0x6e, 0x10, 0x7b, 0xa4, 0xe2, 0x8c, 0xef, - 0xe4, 0x57, 0xac, 0x0a, 0x8b, 0xd8, 0x82, 0x69, 0x4f, 0xfb, 0x4f, 0x2f, 0x14, 0xf9, 0xb8, 0x7c, - 0x7f, 0xaf, 0x4b, 0x94, 0xdd, 0x3e, 0x9a, 0xaf, 0x68, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xca, - 0x82, 0xc7, 0x74, 0x44, 0xcd, 0x1d, 0xa5, 0x97, 0x8e, 0xa0, 0x0c, 0x23, 0x7e, 0x9b, 0x04, 0x4e, - 0xe4, 0x07, 0xe2, 0xd6, 0xb8, 0x2c, 0x07, 0xfd, 0x96, 0x28, 0x3f, 0x14, 0xd1, 0xc7, 0x25, 0x75, - 0x59, 0x8e, 0x55, 0x4d, 0xfa, 0xfa, 0x64, 0x83, 0x11, 0x0a, 0xc7, 0x23, 0x76, 0x06, 0x30, 0x75, - 0x79, 0x88, 0x05, 0xc4, 0xfe, 0xba, 0xc5, 0x17, 0x96, 0xde, 0x75, 0xf4, 0x16, 0x4c, 0xb5, 0x9c, - 0xa8, 0xbe, 0xb3, 0x72, 0xbf, 0x1d, 0x70, 0x8d, 0x8b, 0x1c, 0xa7, 0x67, 0x7a, 0x8d, 0x93, 0xf6, - 0x91, 0xb1, 0x61, 0xe5, 0x5a, 0x82, 0x18, 0x4e, 0x91, 0x47, 0x9b, 0x30, 0xca, 0xca, 0x98, 0x4f, - 0x5d, 0xd8, 0x8d, 0x35, 0xc8, 0x6b, 0x4d, 0x59, 0x1c, 0xac, 0xc5, 0x74, 0xb0, 0x4e, 0xd4, 0xfe, - 0x4a, 0x91, 0xef, 0x76, 0xc6, 0xca, 0x3f, 0x0d, 0xc3, 0x6d, 0xbf, 0xb1, 0x5c, 0x29, 0x63, 0x31, - 0x0b, 0xea, 0x1a, 0xa9, 0xf2, 0x62, 0x2c, 0xe1, 0xe8, 0x32, 0x8c, 0x88, 0x9f, 0x52, 0x43, 0xc6, - 0xce, 0x66, 0x81, 0x17, 0x62, 0x05, 0x45, 0xcf, 0x03, 0xb4, 0x03, 0x7f, 0xcf, 0x6d, 0xb0, 0x58, - 0x0f, 0x45, 0xd3, 0x58, 0xa8, 0xaa, 0x20, 0x58, 0xc3, 0x42, 0xaf, 0xc2, 0x78, 0xc7, 0x0b, 0x39, - 0x3b, 0xa2, 0x45, 0x76, 0x55, 0x66, 0x2c, 0xb7, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x22, 0x0c, 0x45, - 0x0e, 0x33, 0x7e, 0x19, 0xcc, 0x37, 0xbe, 0xdd, 0xa0, 0x18, 0x7a, 0xaa, 0x0e, 0x5a, 0x01, 0x8b, - 0x8a, 0xe8, 0x53, 0xd2, 0xbd, 0x95, 0x1f, 0xec, 0xc2, 0xea, 0xbd, 0xbf, 0x4b, 0x40, 0x73, 0x6e, - 0x15, 0xd6, 0xf4, 0x06, 0x2d, 0xf4, 0x0a, 0x00, 0xb9, 0x1f, 0x91, 0xc0, 0x73, 0x9a, 0xca, 0xb6, - 0x4c, 0xf1, 0x05, 0x65, 0x7f, 0xdd, 0x8f, 0x6e, 0x87, 0x64, 0x45, 0x61, 0x60, 0x0d, 0xdb, 0xfe, - 0xcd, 0x12, 0x40, 0xcc, 0xb7, 0xa3, 0xb7, 0x53, 0x07, 0xd7, 0xb3, 0xdd, 0x39, 0xfd, 0xe3, 0x3b, - 0xb5, 0xd0, 0xf7, 0x59, 0x30, 0xea, 0x34, 0x9b, 0x7e, 0xdd, 0xe1, 0xb1, 0x77, 0x0b, 0xdd, 0x0f, - 0x4e, 0xd1, 0xfe, 0x62, 0x5c, 0x83, 0x77, 0xe1, 0x05, 0xb9, 0x42, 0x35, 0x48, 0xcf, 0x5e, 0xe8, - 0x0d, 0xa3, 0x0f, 0xc9, 0xa7, 0x62, 0xd1, 0x18, 0x4a, 0xf5, 0x54, 0x2c, 0xb1, 0x3b, 0x42, 0x7f, - 0x25, 0xde, 0x36, 0x5e, 0x89, 0x03, 0xf9, 0xfe, 0x7b, 0x06, 0xfb, 0xda, 0xeb, 0x81, 0x88, 0xaa, - 0xba, 0x2f, 0xff, 0x60, 0xbe, 0xb3, 0x9c, 0xf6, 0x4e, 0xea, 0xe1, 0xc7, 0xff, 0x39, 0x98, 0x6c, - 0x98, 0x4c, 0x80, 0x58, 0x89, 0x4f, 0xe5, 0xd1, 0x4d, 0xf0, 0x0c, 0xf1, 0xb5, 0x9f, 0x00, 0xe0, - 0x24, 0x61, 0x54, 0xe5, 0xa1, 0x1d, 0x2a, 0xde, 0x96, 0x2f, 0x3c, 0x2f, 0xec, 0xdc, 0xb9, 0xdc, - 0x0f, 0x23, 0xd2, 0xa2, 0x98, 0xf1, 0xed, 0xbe, 0x2e, 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x43, - 0xcc, 0x5b, 0x2a, 0x9c, 0x1d, 0xc9, 0x97, 0x38, 0x9b, 0xb1, 0xca, 0xe2, 0x0d, 0xc9, 0xfe, 0x86, - 0x58, 0x50, 0x40, 0xd7, 0xa5, 0x2f, 0x62, 0x58, 0xf1, 0x6e, 0x87, 0x84, 0xf9, 0x22, 0x96, 0x96, - 0xde, 0x1f, 0xbb, 0x19, 0xf2, 0xf2, 0xcc, 0x84, 0x5e, 0x46, 0x4d, 0xca, 0x45, 0x89, 0xff, 0x32, - 0x4f, 0xd8, 0x2c, 0xe4, 0x77, 0xcf, 0xcc, 0x25, 0x16, 0x0f, 0xe7, 0x1d, 0x93, 0x04, 0x4e, 0xd2, - 0xa4, 0x1c, 0x29, 0xdf, 0xf5, 0xc2, 0x77, 0xa3, 0xd7, 0xd9, 0xc1, 0x1f, 0xe2, 0xec, 0x36, 0xe2, - 0x25, 0x58, 0xd4, 0x3f, 0x51, 0xf6, 0x60, 0xce, 0x83, 0xa9, 0xe4, 0x16, 0x7d, 0xa4, 0xec, 0xc8, - 0x1f, 0x0e, 0xc0, 0x84, 0xb9, 0xa4, 0xd0, 0x15, 0x28, 0x09, 0x22, 0x2a, 0xb6, 0xbf, 0xda, 0x25, - 0x6b, 0x12, 0x80, 0x63, 0x1c, 0x96, 0xd2, 0x81, 0x55, 0xd7, 0x8c, 0x75, 0xe3, 0x94, 0x0e, 0x0a, - 0x82, 0x35, 0x2c, 0xfa, 0xb0, 0xda, 0xf4, 0xfd, 0x48, 0x5d, 0x48, 0x6a, 0xdd, 0x2d, 0xb1, 0x52, - 0x2c, 0xa0, 0xf4, 0x22, 0xda, 0x25, 0x81, 0x47, 0x9a, 0x66, 0x14, 0x60, 0x75, 0x11, 0xdd, 0xd0, - 0x81, 0xd8, 0xc4, 0xa5, 0xd7, 0xa9, 0x1f, 0xb2, 0x85, 0x2c, 0x9e, 0x6f, 0xb1, 0xf1, 0x73, 0x8d, - 0xbb, 0x43, 0x4b, 0x38, 0xfa, 0x24, 0x3c, 0xa6, 0x22, 0x1d, 0x61, 0xae, 0xcd, 0x90, 0x2d, 0x0e, - 0x19, 0xd2, 0x96, 0xc7, 0x96, 0xb3, 0xd1, 0x70, 0x5e, 0x7d, 0xf4, 0x1a, 0x4c, 0x08, 0x16, 0x5f, - 0x52, 0x1c, 0x36, 0x0d, 0x6c, 0x6e, 0x18, 0x50, 0x9c, 0xc0, 0x96, 0x71, 0x8c, 0x19, 0x97, 0x2d, - 0x29, 0x8c, 0xa4, 0xe3, 0x18, 0xeb, 0x70, 0x9c, 0xaa, 0x81, 0x16, 0x61, 0x92, 0xf3, 0x60, 0xae, - 0xb7, 0xcd, 0xe7, 0x44, 0xb8, 0x56, 0xa9, 0x2d, 0x75, 0xcb, 0x04, 0xe3, 0x24, 0x3e, 0x7a, 0x19, - 0xc6, 0x9c, 0xa0, 0xbe, 0xe3, 0x46, 0xa4, 0x1e, 0x75, 0x02, 0xee, 0x73, 0xa5, 0x59, 0x28, 0x2d, - 0x6a, 0x30, 0x6c, 0x60, 0xda, 0x6f, 0xc3, 0x4c, 0x46, 0x9c, 0x04, 0xba, 0x70, 0x9c, 0xb6, 0x2b, - 0xbf, 0x29, 0x61, 0xc6, 0xbc, 0x58, 0xad, 0xc8, 0xaf, 0xd1, 0xb0, 0xe8, 0xea, 0x64, 0xf1, 0x14, - 0xb4, 0xb4, 0x80, 0x6a, 0x75, 0xae, 0x4a, 0x00, 0x8e, 0x71, 0xec, 0xff, 0x56, 0x80, 0xc9, 0x0c, - 0xdd, 0x0a, 0x4b, 0x4d, 0x97, 0x78, 0xa4, 0xc4, 0x99, 0xe8, 0xcc, 0xb0, 0xd8, 0x85, 0x23, 0x84, - 0xc5, 0x2e, 0xf6, 0x0a, 0x8b, 0x3d, 0xf0, 0x4e, 0xc2, 0x62, 0x9b, 0x23, 0x36, 0xd8, 0xd7, 0x88, - 0x65, 0x84, 0xd2, 0x1e, 0x3a, 0x62, 0x28, 0x6d, 0x63, 0xd0, 0x87, 0xfb, 0x18, 0xf4, 0x1f, 0x2d, - 0xc0, 0x54, 0xd2, 0x92, 0xf2, 0x04, 0xe4, 0xb6, 0xaf, 0x1b, 0x72, 0xdb, 0xcb, 0xfd, 0xb8, 0xc2, - 0xe6, 0xca, 0x70, 0x71, 0x42, 0x86, 0xfb, 0xc1, 0xbe, 0xa8, 0x75, 0x97, 0xe7, 0xfe, 0xf5, 0x02, - 0x9c, 0xce, 0xf4, 0xc5, 0x3d, 0x81, 0xb1, 0xb9, 0x65, 0x8c, 0xcd, 0x73, 0x7d, 0xbb, 0x09, 0xe7, - 0x0e, 0xd0, 0xdd, 0xc4, 0x00, 0x5d, 0xe9, 0x9f, 0x64, 0xf7, 0x51, 0xfa, 0x5a, 0x11, 0xce, 0x67, - 0xd6, 0x8b, 0xc5, 0x9e, 0xab, 0x86, 0xd8, 0xf3, 0xf9, 0x84, 0xd8, 0xd3, 0xee, 0x5e, 0xfb, 0x78, - 0xe4, 0xa0, 0xc2, 0x5d, 0x96, 0x39, 0xfd, 0x3f, 0xa4, 0x0c, 0xd4, 0x70, 0x97, 0x55, 0x84, 0xb0, - 0x49, 0xf7, 0x5b, 0x49, 0xf6, 0xf9, 0xaf, 0x2d, 0x38, 0x9b, 0x39, 0x37, 0x27, 0x20, 0xeb, 0x5a, - 0x37, 0x65, 0x5d, 0x4f, 0xf7, 0xbd, 0x5a, 0x73, 0x84, 0x5f, 0x5f, 0x19, 0xcc, 0xf9, 0x16, 0xf6, - 0x92, 0xbf, 0x05, 0xa3, 0x4e, 0xbd, 0x4e, 0xc2, 0x70, 0xcd, 0x6f, 0xa8, 0xc8, 0xbf, 0xcf, 0xb1, - 0x77, 0x56, 0x5c, 0x7c, 0x78, 0x30, 0x3f, 0x97, 0x24, 0x11, 0x83, 0xb1, 0x4e, 0x01, 0x7d, 0x1a, - 0x46, 0x42, 0x71, 0x6f, 0x8a, 0xb9, 0x7f, 0xa1, 0xcf, 0xc1, 0x71, 0x36, 0x49, 0xd3, 0x0c, 0x4d, - 0xa4, 0x24, 0x15, 0x8a, 0xa4, 0x19, 0xc6, 0xa4, 0x70, 0xac, 0x61, 0x4c, 0x9e, 0x07, 0xd8, 0x53, - 0x8f, 0x81, 0xa4, 0xfc, 0x41, 0x7b, 0x26, 0x68, 0x58, 0xe8, 0xe3, 0x30, 0x15, 0xf2, 0xd8, 0x7d, - 0xcb, 0x4d, 0x27, 0x64, 0xce, 0x32, 0x62, 0x15, 0xb2, 0xf0, 0x47, 0xb5, 0x04, 0x0c, 0xa7, 0xb0, - 0xd1, 0xaa, 0x6c, 0x95, 0x05, 0x1a, 0xe4, 0x0b, 0xf3, 0x52, 0xdc, 0xa2, 0x48, 0x8c, 0x7b, 0x2a, - 0x39, 0xfc, 0x6c, 0xe0, 0xb5, 0x9a, 0xe8, 0xd3, 0x00, 0x74, 0xf9, 0x08, 0x39, 0xc4, 0x70, 0xfe, - 0xe1, 0x49, 0x4f, 0x95, 0x46, 0xa6, 0x6d, 0x2f, 0xf3, 0x70, 0x2d, 0x2b, 0x22, 0x58, 0x23, 0x88, - 0xb6, 0x60, 0x3c, 0xfe, 0x17, 0xe7, 0x8d, 0x3c, 0x62, 0x0b, 0x4c, 0xee, 0x5d, 0xd6, 0xe9, 0x60, - 0x93, 0xac, 0xfd, 0xa3, 0x03, 0xf0, 0x78, 0x97, 0xb3, 0x18, 0x2d, 0x9a, 0xfa, 0xde, 0x67, 0x92, - 0x8f, 0xf8, 0xb9, 0xcc, 0xca, 0xc6, 0xab, 0x3e, 0xb1, 0xe4, 0x0b, 0xef, 0x78, 0xc9, 0xff, 0x90, - 0xa5, 0x89, 0x57, 0xb8, 0x65, 0xe9, 0xc7, 0x8e, 0x78, 0xc7, 0x1c, 0xa3, 0xbc, 0x65, 0x2b, 0x43, - 0x68, 0xf1, 0x7c, 0xdf, 0xdd, 0xe9, 0x5b, 0x8a, 0x71, 0xb2, 0xd2, 0xe8, 0xdf, 0xb6, 0xe0, 0x5c, - 0xd7, 0xe0, 0x20, 0xdf, 0x84, 0x8c, 0x89, 0xfd, 0x05, 0x0b, 0x9e, 0xcc, 0xac, 0x61, 0x98, 0x33, - 0x5d, 0x81, 0x52, 0x9d, 0x16, 0x6a, 0xde, 0xa0, 0xb1, 0x9b, 0xbc, 0x04, 0xe0, 0x18, 0xc7, 0xb0, - 0x5a, 0x2a, 0xf4, 0xb4, 0x5a, 0xfa, 0x35, 0x0b, 0x52, 0x87, 0xcb, 0x09, 0xdc, 0x72, 0x15, 0xf3, - 0x96, 0x7b, 0x7f, 0x3f, 0xa3, 0x99, 0x73, 0xc1, 0xfd, 0xc9, 0x24, 0x9c, 0xc9, 0xf1, 0x86, 0xda, - 0x83, 0xe9, 0xed, 0x3a, 0x31, 0xfd, 0x6c, 0xbb, 0xc5, 0x9f, 0xe9, 0xea, 0x94, 0xcb, 0x52, 0x84, - 0x4e, 0xa7, 0x50, 0x70, 0xba, 0x09, 0xf4, 0x05, 0x0b, 0x4e, 0x39, 0xf7, 0xc2, 0x15, 0xca, 0xad, - 0xb8, 0xf5, 0xa5, 0xa6, 0x5f, 0xdf, 0xa5, 0x57, 0x81, 0xdc, 0x08, 0x2f, 0x66, 0x4a, 0x90, 0xee, - 0xd6, 0x52, 0xf8, 0x46, 0xf3, 0x2c, 0x67, 0x6a, 0x16, 0x16, 0xce, 0x6c, 0x0b, 0x61, 0x11, 0x48, - 0x9f, 0xbe, 0x85, 0xba, 0x78, 0x82, 0x67, 0xb9, 0xad, 0xf1, 0xeb, 0x57, 0x42, 0xb0, 0xa2, 0x83, - 0x3e, 0x0b, 0xa5, 0x6d, 0xe9, 0x4b, 0x9a, 0x71, 0xbd, 0xc7, 0x03, 0xd9, 0xdd, 0xc3, 0x96, 0xab, - 0x81, 0x15, 0x12, 0x8e, 0x89, 0xa2, 0xd7, 0xa0, 0xe8, 0x6d, 0x85, 0xdd, 0xd2, 0x8e, 0x26, 0xec, - 0xfd, 0x78, 0xbc, 0x85, 0xf5, 0xd5, 0x1a, 0xa6, 0x15, 0xd1, 0x75, 0x28, 0x06, 0x9b, 0x0d, 0x21, - 0xfe, 0xcc, 0xdc, 0xa4, 0x78, 0xa9, 0x9c, 0xd3, 0x2b, 0x46, 0x09, 0x2f, 0x95, 0x31, 0x25, 0x81, - 0xaa, 0x30, 0xc8, 0x5c, 0x88, 0xc4, 0x65, 0x9a, 0xf9, 0x6c, 0xe8, 0xe2, 0x8a, 0xc7, 0x83, 0x32, - 0x30, 0x04, 0xcc, 0x09, 0xa1, 0x0d, 0x18, 0xaa, 0xb3, 0x14, 0x95, 0xe2, 0xf6, 0xfc, 0x50, 0xa6, - 0xa0, 0xb3, 0x4b, 0xee, 0x4e, 0x21, 0xf7, 0x63, 0x18, 0x58, 0xd0, 0x62, 0x54, 0x49, 0x7b, 0x67, - 0x2b, 0x14, 0x29, 0x95, 0xb3, 0xa9, 0x76, 0x49, 0x49, 0x2b, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, - 0x57, 0xa0, 0xb0, 0x55, 0x17, 0xee, 0x41, 0x99, 0x12, 0x4f, 0x33, 0x64, 0xc6, 0xd2, 0xd0, 0x83, - 0x83, 0xf9, 0xc2, 0xea, 0x32, 0x2e, 0x6c, 0xd5, 0xd1, 0x3a, 0x0c, 0x6f, 0x71, 0x27, 0x7b, 0x21, - 0xd4, 0x7c, 0x2a, 0xdb, 0xff, 0x3f, 0xe5, 0x87, 0xcf, 0x3d, 0x63, 0x04, 0x00, 0x4b, 0x22, 0x2c, - 0x2e, 0xbd, 0x0a, 0x16, 0x20, 0x62, 0x95, 0x2d, 0x1c, 0x2d, 0xc0, 0x03, 0x67, 0x6e, 0xe2, 0x90, - 0x03, 0x58, 0xa3, 0x48, 0x57, 0xb5, 0x23, 0xf3, 0xda, 0x8b, 0xa0, 0x36, 0x99, 0xab, 0xba, 0x47, - 0xca, 0x7f, 0xbe, 0xaa, 0x15, 0x12, 0x8e, 0x89, 0xa2, 0x5d, 0x18, 0xdf, 0x0b, 0xdb, 0x3b, 0x44, - 0x6e, 0x69, 0x16, 0xe3, 0x26, 0xe7, 0x5e, 0xbe, 0x23, 0x10, 0xdd, 0x20, 0xea, 0x38, 0xcd, 0xd4, - 0x29, 0xc4, 0x78, 0xa8, 0x3b, 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0xc3, 0xff, 0x56, 0xc7, 0xdf, 0xdc, - 0x8f, 0x88, 0x08, 0x31, 0x96, 0x39, 0xfc, 0x6f, 0x70, 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, - 0xd0, 0x1d, 0x31, 0x3c, 0xec, 0xf4, 0x9c, 0xca, 0x8f, 0x03, 0xba, 0x28, 0x91, 0x72, 0x06, 0x85, - 0x9d, 0x96, 0x31, 0x29, 0x76, 0x4a, 0xb6, 0x77, 0xfc, 0xc8, 0xf7, 0x12, 0x27, 0xf4, 0x74, 0xfe, - 0x29, 0x59, 0xcd, 0xc0, 0x4f, 0x9f, 0x92, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0xd4, 0x80, 0x89, 0xb6, - 0x1f, 0x44, 0xf7, 0xfc, 0x40, 0xae, 0x2f, 0xd4, 0x45, 0x28, 0x63, 0x60, 0x8a, 0x16, 0x59, 0xf4, - 0x3e, 0x13, 0x82, 0x13, 0x34, 0xd1, 0x27, 0x60, 0x38, 0xac, 0x3b, 0x4d, 0x52, 0xb9, 0x35, 0x3b, - 0x93, 0x7f, 0xfd, 0xd4, 0x38, 0x4a, 0xce, 0xea, 0xe2, 0x41, 0xec, 0x39, 0x0a, 0x96, 0xe4, 0xd0, - 0x2a, 0x0c, 0xb2, 0xbc, 0x63, 0x2c, 0x1e, 0x5e, 0x4e, 0x38, 0xd3, 0x94, 0xf5, 0x35, 0x3f, 0x9b, - 0x58, 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x6f, 0x13, 0x3f, 0x9c, 0x3d, 0x9d, 0xbf, 0x07, 0xc4, - 0x93, 0xe6, 0x56, 0xad, 0xdb, 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x9e, - 0xe9, 0x62, 0x36, 0x94, 0x7b, 0x96, 0xb2, 0x93, 0x99, 0x9e, 0xa4, 0x94, 0x84, 0xfd, 0xfb, 0xc3, - 0x69, 0x9e, 0x85, 0xbd, 0x66, 0xbf, 0xc7, 0x4a, 0x29, 0x3a, 0x3f, 0xdc, 0xaf, 0x70, 0xed, 0x18, - 0x59, 0xf0, 0x2f, 0x58, 0x70, 0xa6, 0x9d, 0xf9, 0x21, 0x82, 0x01, 0xe8, 0x4f, 0x46, 0xc7, 0x3f, - 0x5d, 0xc5, 0x4e, 0xcc, 0x86, 0xe3, 0x9c, 0x96, 0x92, 0xcf, 0x9c, 0xe2, 0x3b, 0x7e, 0xe6, 0xac, - 0xc1, 0x08, 0x63, 0x32, 0x7b, 0xa4, 0x6c, 0x4e, 0xbe, 0xf9, 0x18, 0x2b, 0xb1, 0x2c, 0x2a, 0x62, - 0x45, 0x02, 0xfd, 0xb0, 0x05, 0xe7, 0x92, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0xc0, 0x45, 0xfe, 0x90, - 0x5e, 0x15, 0xdf, 0x9f, 0xe2, 0xff, 0x0d, 0xe4, 0xc3, 0x5e, 0x08, 0xb8, 0x7b, 0x63, 0xa8, 0x9c, - 0xf1, 0x92, 0x1f, 0x32, 0xb5, 0x17, 0x7d, 0xbc, 0xe6, 0x5f, 0x84, 0xb1, 0x96, 0xdf, 0xf1, 0x22, - 0x61, 0x65, 0x24, 0x2c, 0x1e, 0x98, 0xa6, 0x7f, 0x4d, 0x2b, 0xc7, 0x06, 0x56, 0x42, 0x06, 0x30, - 0xf2, 0xd0, 0x32, 0x80, 0x37, 0x61, 0xcc, 0xd3, 0xcc, 0x62, 0x05, 0x3f, 0x70, 0x29, 0x3f, 0x58, - 0xaa, 0x6e, 0x44, 0xcb, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0xda, 0xc9, 0x3e, 0xf8, 0xbe, 0x6c, 0x65, - 0x30, 0xf5, 0x5c, 0x04, 0xf0, 0x51, 0x53, 0x04, 0x70, 0x29, 0x29, 0x02, 0x48, 0x49, 0xae, 0x8d, - 0xd7, 0x7f, 0xff, 0xb9, 0x60, 0xfa, 0x0d, 0xb8, 0x68, 0x37, 0xe1, 0x42, 0xaf, 0x6b, 0x89, 0x99, - 0x9b, 0x35, 0x94, 0x9e, 0x32, 0x36, 0x37, 0x6b, 0x54, 0xca, 0x98, 0x41, 0xfa, 0x0d, 0xe5, 0x63, - 0xff, 0x17, 0x0b, 0x8a, 0x55, 0xbf, 0x71, 0x02, 0x0f, 0xde, 0x8f, 0x19, 0x0f, 0xde, 0xc7, 0xb3, - 0x2f, 0xc4, 0x46, 0xae, 0xdc, 0x7d, 0x25, 0x21, 0x77, 0x3f, 0x97, 0x47, 0xa0, 0xbb, 0x94, 0xfd, - 0xa7, 0x8b, 0x30, 0x5a, 0xf5, 0x1b, 0xca, 0xd6, 0xfb, 0x9f, 0x3d, 0x8c, 0xad, 0x77, 0x6e, 0x46, - 0x03, 0x8d, 0x32, 0xb3, 0x52, 0x93, 0x6e, 0xae, 0xdf, 0x64, 0x26, 0xdf, 0x77, 0x89, 0xbb, 0xbd, - 0x13, 0x91, 0x46, 0xf2, 0x73, 0x4e, 0xce, 0xe4, 0xfb, 0xf7, 0x0b, 0x30, 0x99, 0x68, 0x1d, 0x35, - 0x61, 0xbc, 0xa9, 0x4b, 0x75, 0xc5, 0x3a, 0x7d, 0x28, 0x81, 0xb0, 0x30, 0x99, 0xd5, 0x8a, 0xb0, - 0x49, 0x1c, 0x2d, 0x00, 0x28, 0x35, 0xa7, 0x14, 0xeb, 0x31, 0xae, 0x5f, 0xe9, 0x41, 0x43, 0xac, - 0x61, 0xa0, 0x97, 0x60, 0x34, 0xf2, 0xdb, 0x7e, 0xd3, 0xdf, 0xde, 0xbf, 0x41, 0x64, 0xf0, 0x28, - 0x65, 0x08, 0xb7, 0x11, 0x83, 0xb0, 0x8e, 0x87, 0xee, 0xc3, 0xb4, 0x22, 0x52, 0x3b, 0x06, 0x49, - 0x37, 0x93, 0x2a, 0xac, 0x27, 0x29, 0xe2, 0x74, 0x23, 0xf6, 0xcf, 0x16, 0xf9, 0x10, 0x7b, 0x91, - 0xfb, 0xde, 0x6e, 0x78, 0x77, 0xef, 0x86, 0xaf, 0x59, 0x30, 0x45, 0x5b, 0x67, 0x56, 0x3e, 0xf2, - 0x9a, 0x57, 0xe1, 0x99, 0xad, 0x2e, 0xe1, 0x99, 0x2f, 0xd1, 0x53, 0xb3, 0xe1, 0x77, 0x22, 0x21, - 0xbb, 0xd3, 0x8e, 0x45, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x20, 0x10, 0x9e, 0x89, 0x3a, 0x1e, - 0x09, 0x02, 0x2c, 0xa0, 0x32, 0x7a, 0xf3, 0x40, 0x76, 0xf4, 0x66, 0x1e, 0x84, 0x53, 0xd8, 0x83, - 0x08, 0x86, 0x4b, 0x0b, 0xc2, 0x29, 0x0d, 0x45, 0x62, 0x1c, 0xfb, 0xe7, 0x8b, 0x30, 0x56, 0xf5, - 0x1b, 0xb1, 0x8a, 0xf3, 0x45, 0x43, 0xc5, 0x79, 0x21, 0xa1, 0xe2, 0x9c, 0xd2, 0x71, 0xdf, 0x53, - 0x68, 0x7e, 0xa3, 0x14, 0x9a, 0xff, 0xd8, 0x62, 0xb3, 0x56, 0x5e, 0xaf, 0x71, 0xa3, 0x31, 0x74, - 0x15, 0x46, 0xd9, 0x01, 0xc3, 0x5c, 0x61, 0xa5, 0xde, 0x8f, 0x65, 0x25, 0x5a, 0x8f, 0x8b, 0xb1, - 0x8e, 0x83, 0x2e, 0xc3, 0x48, 0x48, 0x9c, 0xa0, 0xbe, 0xa3, 0x4e, 0x57, 0xa1, 0xa4, 0xe3, 0x65, - 0x58, 0x41, 0xd1, 0x1b, 0x71, 0xfc, 0xc7, 0x62, 0xbe, 0x6b, 0x9d, 0xde, 0x1f, 0xbe, 0x45, 0xf2, - 0x83, 0x3e, 0xda, 0x77, 0x01, 0xa5, 0xf1, 0xfb, 0x08, 0x7c, 0x36, 0x6f, 0x06, 0x3e, 0x2b, 0xa5, - 0x82, 0x9e, 0xfd, 0xb9, 0x05, 0x13, 0x55, 0xbf, 0x41, 0xb7, 0xee, 0xb7, 0xd2, 0x3e, 0xd5, 0x83, - 0xdf, 0x0e, 0x75, 0x09, 0x7e, 0x7b, 0x11, 0x06, 0xab, 0x7e, 0xa3, 0x52, 0xed, 0xe6, 0xd7, 0x6e, - 0xff, 0x0d, 0x0b, 0x86, 0xab, 0x7e, 0xe3, 0x04, 0xd4, 0x02, 0x1f, 0x35, 0xd5, 0x02, 0x8f, 0xe5, - 0xac, 0x9b, 0x1c, 0x4d, 0xc0, 0x5f, 0x1b, 0x80, 0x71, 0xda, 0x4f, 0x7f, 0x5b, 0x4e, 0xa5, 0x31, - 0x6c, 0x56, 0x1f, 0xc3, 0x46, 0xb9, 0x70, 0xbf, 0xd9, 0xf4, 0xef, 0x25, 0xa7, 0x75, 0x95, 0x95, - 0x62, 0x01, 0x45, 0xcf, 0xc2, 0x48, 0x3b, 0x20, 0x7b, 0xae, 0x2f, 0xd8, 0x5b, 0x4d, 0xc9, 0x52, - 0x15, 0xe5, 0x58, 0x61, 0xd0, 0x67, 0x61, 0xe8, 0x7a, 0xf4, 0x2a, 0xaf, 0xfb, 0x5e, 0x83, 0x4b, - 0xce, 0x8b, 0x22, 0x43, 0x83, 0x56, 0x8e, 0x0d, 0x2c, 0x74, 0x17, 0x4a, 0xec, 0x3f, 0x3b, 0x76, - 0x8e, 0x9e, 0xeb, 0x53, 0xe4, 0x7e, 0x13, 0x04, 0x70, 0x4c, 0x0b, 0x3d, 0x0f, 0x10, 0xc9, 0x28, - 0xe7, 0xa1, 0x08, 0x72, 0xa5, 0x9e, 0x02, 0x2a, 0xfe, 0x79, 0x88, 0x35, 0x2c, 0xf4, 0x0c, 0x94, - 0x22, 0xc7, 0x6d, 0xde, 0x74, 0x3d, 0x12, 0x32, 0x89, 0x78, 0x51, 0xa6, 0x60, 0x13, 0x85, 0x38, - 0x86, 0x53, 0x56, 0x8c, 0x45, 0x80, 0xe0, 0x99, 0x82, 0x47, 0x18, 0x36, 0x63, 0xc5, 0x6e, 0xaa, - 0x52, 0xac, 0x61, 0xa0, 0x1d, 0x78, 0xc2, 0xf5, 0x58, 0x36, 0x03, 0x52, 0xdb, 0x75, 0xdb, 0x1b, - 0x37, 0x6b, 0x77, 0x48, 0xe0, 0x6e, 0xed, 0x2f, 0x39, 0xf5, 0x5d, 0xe2, 0xc9, 0x2c, 0x8e, 0xef, - 0x17, 0x5d, 0x7c, 0xa2, 0xd2, 0x05, 0x17, 0x77, 0xa5, 0x64, 0xbf, 0x0c, 0xa7, 0xab, 0x7e, 0xa3, - 0xea, 0x07, 0xd1, 0xaa, 0x1f, 0xdc, 0x73, 0x82, 0x86, 0x5c, 0x29, 0xf3, 0x32, 0x1a, 0x03, 0x3d, - 0x0a, 0x07, 0xf9, 0x41, 0x61, 0x44, 0x5a, 0x78, 0x81, 0x31, 0x5f, 0x47, 0xf4, 0x21, 0xaa, 0x33, - 0x36, 0x40, 0xa5, 0xf6, 0xb8, 0xe6, 0x44, 0x04, 0xdd, 0x62, 0x29, 0x8b, 0xe3, 0x1b, 0x51, 0x54, - 0x7f, 0x5a, 0x4b, 0x59, 0x1c, 0x03, 0x33, 0xaf, 0x50, 0xb3, 0xbe, 0xfd, 0x5f, 0x07, 0xd9, 0xe1, - 0x98, 0x48, 0x0f, 0x81, 0x3e, 0x03, 0x13, 0x21, 0xb9, 0xe9, 0x7a, 0x9d, 0xfb, 0x52, 0x1a, 0xd1, - 0xc5, 0x0b, 0xac, 0xb6, 0xa2, 0x63, 0x72, 0x99, 0xa6, 0x59, 0x86, 0x13, 0xd4, 0x50, 0x0b, 0x26, - 0xee, 0xb9, 0x5e, 0xc3, 0xbf, 0x17, 0x4a, 0xfa, 0x23, 0xf9, 0xa2, 0xcd, 0xbb, 0x1c, 0x33, 0xd1, - 0x47, 0xa3, 0xb9, 0xbb, 0x06, 0x31, 0x9c, 0x20, 0x4e, 0x17, 0x60, 0xd0, 0xf1, 0x16, 0xc3, 0xdb, - 0x21, 0x09, 0x44, 0xf2, 0x69, 0xb6, 0x00, 0xb1, 0x2c, 0xc4, 0x31, 0x9c, 0x2e, 0x40, 0xf6, 0xe7, - 0x5a, 0xe0, 0x77, 0x78, 0x2e, 0x02, 0xb1, 0x00, 0xb1, 0x2a, 0xc5, 0x1a, 0x06, 0xdd, 0xa0, 0xec, - 0xdf, 0xba, 0xef, 0x61, 0xdf, 0x8f, 0xe4, 0x96, 0x66, 0xe9, 0x4e, 0xb5, 0x72, 0x6c, 0x60, 0xa1, - 0x55, 0x40, 0x61, 0xa7, 0xdd, 0x6e, 0x32, 0xf3, 0x12, 0xa7, 0xc9, 0x48, 0x71, 0x95, 0x7b, 0x91, - 0x87, 0x68, 0xad, 0xa5, 0xa0, 0x38, 0xa3, 0x06, 0x3d, 0xab, 0xb7, 0x44, 0x57, 0x07, 0x59, 0x57, - 0xb9, 0x1a, 0xa4, 0xc6, 0xfb, 0x29, 0x61, 0x68, 0x05, 0x86, 0xc3, 0xfd, 0xb0, 0x1e, 0x89, 0x58, - 0x73, 0x39, 0x19, 0x80, 0x6a, 0x0c, 0x45, 0x4b, 0x40, 0xc7, 0xab, 0x60, 0x59, 0x17, 0xd5, 0x61, - 0x46, 0x50, 0x5c, 0xde, 0x71, 0x3c, 0x95, 0x4f, 0x85, 0x5b, 0xd9, 0x5e, 0x7d, 0x70, 0x30, 0x3f, - 0x23, 0x5a, 0xd6, 0xc1, 0x87, 0x07, 0xf3, 0x67, 0xaa, 0x7e, 0x23, 0x03, 0x82, 0xb3, 0xa8, 0xf1, - 0xc5, 0x57, 0xaf, 0xfb, 0xad, 0x76, 0x35, 0xf0, 0xb7, 0xdc, 0x26, 0xe9, 0xa6, 0x4a, 0xaa, 0x19, - 0x98, 0x62, 0xf1, 0x19, 0x65, 0x38, 0x41, 0xcd, 0xfe, 0x4e, 0xc6, 0xcf, 0xb0, 0x7c, 0xcb, 0x51, - 0x27, 0x20, 0xa8, 0x05, 0xe3, 0x6d, 0xb6, 0x4d, 0x44, 0x86, 0x00, 0xb1, 0xd6, 0x5f, 0xec, 0x53, - 0x24, 0x72, 0x8f, 0x5e, 0x03, 0xa6, 0x99, 0x4a, 0x55, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0x4f, 0x3c, - 0xc6, 0x6e, 0xc4, 0x1a, 0x97, 0x73, 0x0c, 0x0b, 0xa3, 0x7e, 0xf1, 0xb4, 0x9a, 0xcb, 0x17, 0xb8, - 0xc5, 0xd3, 0x22, 0x1c, 0x03, 0xb0, 0xac, 0x8b, 0x3e, 0x0d, 0x13, 0xf4, 0xa5, 0xa2, 0x6e, 0xa5, - 0x70, 0xf6, 0x54, 0x7e, 0xf0, 0x05, 0x85, 0xa5, 0x67, 0x0f, 0xd1, 0x2b, 0xe3, 0x04, 0x31, 0xf4, - 0x06, 0x33, 0x0b, 0x91, 0xa4, 0x0b, 0xfd, 0x90, 0xd6, 0x2d, 0x40, 0x24, 0x59, 0x8d, 0x08, 0xea, - 0xc0, 0x4c, 0x3a, 0xd7, 0x58, 0x38, 0x6b, 0xe7, 0xb3, 0x7c, 0xe9, 0x74, 0x61, 0x71, 0x9a, 0x87, - 0x34, 0x2c, 0xc4, 0x59, 0xf4, 0xd1, 0x4d, 0x18, 0x17, 0x49, 0x87, 0xc5, 0xca, 0x2d, 0x1a, 0x72, - 0xc0, 0x71, 0xac, 0x03, 0x0f, 0x93, 0x05, 0xd8, 0xac, 0x8c, 0xb6, 0xe1, 0x9c, 0x96, 0x04, 0xe8, - 0x5a, 0xe0, 0x30, 0x65, 0xbe, 0xcb, 0x8e, 0x53, 0xed, 0xae, 0x7e, 0xf2, 0xc1, 0xc1, 0xfc, 0xb9, - 0x8d, 0x6e, 0x88, 0xb8, 0x3b, 0x1d, 0x74, 0x0b, 0x4e, 0x73, 0xd7, 0xe1, 0x32, 0x71, 0x1a, 0x4d, - 0xd7, 0x53, 0xcc, 0x00, 0xdf, 0xf2, 0x67, 0x1f, 0x1c, 0xcc, 0x9f, 0x5e, 0xcc, 0x42, 0xc0, 0xd9, - 0xf5, 0xd0, 0x47, 0xa1, 0xd4, 0xf0, 0x42, 0x31, 0x06, 0x43, 0x46, 0x9e, 0xa5, 0x52, 0x79, 0xbd, - 0xa6, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, 0x68, 0x9b, 0xcb, 0x8a, 0x95, 0x04, 0x63, 0x38, 0x15, - 0x3a, 0x29, 0x29, 0xe4, 0x33, 0x9c, 0x07, 0xb9, 0x92, 0x44, 0xd9, 0xd4, 0x1b, 0x7e, 0x85, 0x06, - 0x61, 0xf4, 0x3a, 0x20, 0xfa, 0x82, 0x70, 0xeb, 0x64, 0xb1, 0xce, 0xd2, 0x4f, 0x30, 0xd1, 0xfa, - 0x88, 0xe9, 0xce, 0x56, 0x4b, 0x61, 0xe0, 0x8c, 0x5a, 0xe8, 0x3a, 0x3d, 0x55, 0xf4, 0x52, 0x71, - 0x6a, 0xa9, 0xac, 0x78, 0x65, 0xd2, 0x0e, 0x48, 0xdd, 0x89, 0x48, 0xc3, 0xa4, 0x88, 0x13, 0xf5, - 0x50, 0x03, 0x9e, 0x70, 0x3a, 0x91, 0xcf, 0xc4, 0xf0, 0x26, 0xea, 0x86, 0xbf, 0x4b, 0x3c, 0xa6, - 0x01, 0x1b, 0x59, 0xba, 0x40, 0xb9, 0x8d, 0xc5, 0x2e, 0x78, 0xb8, 0x2b, 0x15, 0xca, 0x25, 0xaa, - 0x34, 0xb8, 0x60, 0x06, 0x84, 0xca, 0x48, 0x85, 0xfb, 0x12, 0x8c, 0xee, 0xf8, 0x61, 0xb4, 0x4e, - 0xa2, 0x7b, 0x7e, 0xb0, 0x2b, 0xe2, 0x7a, 0xc6, 0xb1, 0xa0, 0x63, 0x10, 0xd6, 0xf1, 0xe8, 0x33, - 0x90, 0xd9, 0x67, 0x54, 0xca, 0x4c, 0x35, 0x3e, 0x12, 0x9f, 0x31, 0xd7, 0x79, 0x31, 0x96, 0x70, - 0x89, 0x5a, 0xa9, 0x2e, 0x33, 0x35, 0x77, 0x02, 0xb5, 0x52, 0x5d, 0xc6, 0x12, 0x4e, 0x97, 0x6b, - 0xb8, 0xe3, 0x04, 0xa4, 0x1a, 0xf8, 0x75, 0x12, 0x6a, 0x11, 0xc8, 0x1f, 0xe7, 0x51, 0x4b, 0xe9, - 0x72, 0xad, 0x65, 0x21, 0xe0, 0xec, 0x7a, 0x88, 0xa4, 0x13, 0x60, 0x4d, 0xe4, 0xeb, 0x27, 0xd2, - 0xfc, 0x4c, 0x9f, 0x39, 0xb0, 0x3c, 0x98, 0x52, 0xa9, 0xb7, 0x78, 0x9c, 0xd2, 0x70, 0x76, 0x92, - 0xad, 0xed, 0xfe, 0x83, 0x9c, 0x2a, 0x8d, 0x4f, 0x25, 0x41, 0x09, 0xa7, 0x68, 0x1b, 0x31, 0xbf, - 0xa6, 0x7a, 0xc6, 0xfc, 0xba, 0x02, 0xa5, 0xb0, 0xb3, 0xd9, 0xf0, 0x5b, 0x8e, 0xeb, 0x31, 0x35, - 0xb7, 0xf6, 0x1e, 0xa9, 0x49, 0x00, 0x8e, 0x71, 0xd0, 0x2a, 0x8c, 0x38, 0x52, 0x9d, 0x83, 0xf2, - 0xa3, 0xbc, 0x28, 0x25, 0x0e, 0x0f, 0x7c, 0x20, 0x15, 0x38, 0xaa, 0x2e, 0x7a, 0x15, 0xc6, 0x85, - 0xeb, 0xab, 0xc8, 0xfa, 0x38, 0x63, 0xfa, 0x27, 0xd5, 0x74, 0x20, 0x36, 0x71, 0xd1, 0x6d, 0x18, - 0x8d, 0xfc, 0x26, 0x73, 0xb2, 0xa1, 0x6c, 0xde, 0x99, 0xfc, 0x78, 0x65, 0x1b, 0x0a, 0x4d, 0x97, - 0xa4, 0xaa, 0xaa, 0x58, 0xa7, 0x83, 0x36, 0xf8, 0x7a, 0x67, 0x91, 0xb8, 0x49, 0x38, 0xfb, 0x58, - 0xfe, 0x9d, 0xa4, 0x02, 0x76, 0x9b, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x0d, 0xa6, 0xdb, - 0x81, 0xeb, 0xb3, 0x35, 0xa1, 0x34, 0x79, 0xb3, 0x66, 0x1a, 0xa0, 0x6a, 0x12, 0x01, 0xa7, 0xeb, - 0x30, 0xcf, 0x65, 0x51, 0x38, 0x7b, 0x96, 0x27, 0x86, 0xe6, 0xcf, 0x3b, 0x5e, 0x86, 0x15, 0x14, - 0xad, 0xb1, 0x93, 0x98, 0x4b, 0x26, 0x66, 0xe7, 0xf2, 0x03, 0xcb, 0xe8, 0x12, 0x0c, 0xce, 0xbc, - 0xaa, 0xbf, 0x38, 0xa6, 0x80, 0x1a, 0x5a, 0x06, 0x41, 0xfa, 0x62, 0x08, 0x67, 0x9f, 0xe8, 0x62, - 0x24, 0x97, 0x78, 0x5e, 0xc4, 0x0c, 0x81, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x71, 0x98, 0x12, - 0xe1, 0xf0, 0xe2, 0x61, 0x3a, 0x17, 0x9b, 0x2e, 0xe3, 0x04, 0x0c, 0xa7, 0xb0, 0x79, 0x86, 0x02, - 0x67, 0xb3, 0x49, 0xc4, 0xd1, 0x77, 0xd3, 0xf5, 0x76, 0xc3, 0xd9, 0xf3, 0xec, 0x7c, 0x10, 0x19, - 0x0a, 0x92, 0x50, 0x9c, 0x51, 0x03, 0x6d, 0xc0, 0x54, 0x3b, 0x20, 0xa4, 0xc5, 0x18, 0x7d, 0x71, - 0x9f, 0xcd, 0x73, 0xc7, 0x7d, 0xda, 0x93, 0x6a, 0x02, 0x76, 0x98, 0x51, 0x86, 0x53, 0x14, 0xd0, - 0x3d, 0x18, 0xf1, 0xf7, 0x48, 0xb0, 0x43, 0x9c, 0xc6, 0xec, 0x85, 0x2e, 0xa6, 0xf4, 0xe2, 0x72, - 0xbb, 0x25, 0x70, 0x13, 0xda, 0x7f, 0x59, 0xdc, 0x5b, 0xfb, 0x2f, 0x1b, 0x43, 0x3f, 0x62, 0xc1, - 0x59, 0xa9, 0x30, 0xa8, 0xb5, 0xe9, 0xa8, 0x2f, 0xfb, 0x5e, 0x18, 0x05, 0xdc, 0xd5, 0xfc, 0xc9, - 0x7c, 0xf7, 0xeb, 0x8d, 0x9c, 0x4a, 0x4a, 0x38, 0x7a, 0x36, 0x0f, 0x23, 0xc4, 0xf9, 0x2d, 0xa2, - 0x65, 0x98, 0x0e, 0x49, 0x24, 0x0f, 0xa3, 0xc5, 0x70, 0xf5, 0x8d, 0xf2, 0xfa, 0xec, 0x45, 0xee, - 0x27, 0x4f, 0x37, 0x43, 0x2d, 0x09, 0xc4, 0x69, 0xfc, 0xb9, 0x6f, 0x87, 0xe9, 0xd4, 0xf5, 0x7f, - 0x94, 0xcc, 0x2b, 0x73, 0xbb, 0x30, 0x6e, 0x0c, 0xf1, 0x23, 0xd5, 0x1e, 0xff, 0xcb, 0x61, 0x28, - 0x29, 0xcd, 0x22, 0xba, 0x62, 0x2a, 0x8c, 0xcf, 0x26, 0x15, 0xc6, 0x23, 0xf4, 0x5d, 0xaf, 0xeb, - 0x88, 0x37, 0x32, 0xa2, 0x83, 0xe5, 0x6d, 0xe8, 0xfe, 0xdd, 0xbe, 0x35, 0x71, 0x6d, 0xb1, 0x6f, - 0xcd, 0xf3, 0x40, 0x57, 0x09, 0xf0, 0x35, 0x98, 0xf6, 0x7c, 0xc6, 0x73, 0x92, 0x86, 0x64, 0x28, - 0x18, 0xdf, 0x50, 0xd2, 0xc3, 0x6d, 0x24, 0x10, 0x70, 0xba, 0x0e, 0x6d, 0x90, 0x5f, 0xfc, 0x49, - 0x91, 0x33, 0xe7, 0x0b, 0xb0, 0x80, 0xa2, 0x8b, 0x30, 0xd8, 0xf6, 0x1b, 0x95, 0xaa, 0xe0, 0x37, - 0xb5, 0x98, 0x94, 0x8d, 0x4a, 0x15, 0x73, 0x18, 0x5a, 0x84, 0x21, 0xf6, 0x23, 0x9c, 0x1d, 0xcb, - 0x8f, 0xab, 0xc0, 0x6a, 0x68, 0x79, 0x6d, 0x58, 0x05, 0x2c, 0x2a, 0x32, 0xd1, 0x17, 0x65, 0xd2, - 0x99, 0xe8, 0x6b, 0xf8, 0x21, 0x45, 0x5f, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0xfb, 0x70, 0xda, 0x78, - 0x18, 0xf1, 0x25, 0x42, 0x42, 0xe1, 0xdb, 0x7d, 0xb1, 0xeb, 0x8b, 0x48, 0x68, 0xaa, 0xcf, 0x89, - 0x4e, 0x9f, 0xae, 0x64, 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0x4c, 0xd7, 0x53, 0xad, 0x8e, 0xf4, - 0xdf, 0xaa, 0x9a, 0xd0, 0x74, 0x8b, 0x69, 0xc2, 0xe8, 0x55, 0x18, 0x79, 0xcb, 0x0f, 0xd9, 0x59, - 0x2d, 0x78, 0x64, 0xe9, 0x18, 0x3c, 0xf2, 0xc6, 0xad, 0x1a, 0x2b, 0x3f, 0x3c, 0x98, 0x1f, 0xad, - 0xfa, 0x0d, 0xf9, 0x17, 0xab, 0x0a, 0xe8, 0xfb, 0x2d, 0x98, 0x4b, 0xbf, 0xbc, 0x54, 0xa7, 0xc7, - 0xfb, 0xef, 0xb4, 0x2d, 0x1a, 0x9d, 0x5b, 0xc9, 0x25, 0x87, 0xbb, 0x34, 0x65, 0xff, 0x8a, 0xc5, - 0xa4, 0x6e, 0x42, 0x03, 0x44, 0xc2, 0x4e, 0xf3, 0x24, 0xd2, 0x79, 0xae, 0x18, 0xca, 0xa9, 0x87, - 0xb6, 0x5c, 0xf8, 0xa7, 0x16, 0xb3, 0x5c, 0x38, 0x41, 0x17, 0x85, 0x37, 0x60, 0x24, 0x92, 0x69, - 0x56, 0xbb, 0x64, 0x20, 0xd5, 0x3a, 0xc5, 0xac, 0x37, 0x14, 0xc7, 0xaa, 0x32, 0xaa, 0x2a, 0x32, - 0xf6, 0x3f, 0xe0, 0x33, 0x20, 0x21, 0x27, 0xa0, 0x03, 0x28, 0x9b, 0x3a, 0x80, 0xf9, 0x1e, 0x5f, - 0x90, 0xa3, 0x0b, 0xf8, 0xfb, 0x66, 0xbf, 0x99, 0xa4, 0xe6, 0xdd, 0x6e, 0x32, 0x63, 0x7f, 0xd1, - 0x02, 0x88, 0x43, 0xfe, 0x32, 0xf9, 0xb2, 0x1f, 0xc8, 0x5c, 0x8e, 0x59, 0x59, 0x8b, 0x5e, 0xa6, - 0x3c, 0xaa, 0x1f, 0xf9, 0x75, 0xbf, 0x29, 0x34, 0x5c, 0x4f, 0xc4, 0x6a, 0x08, 0x5e, 0x7e, 0xa8, - 0xfd, 0xc6, 0x0a, 0x1b, 0xcd, 0xcb, 0x00, 0x63, 0xc5, 0x58, 0x31, 0x66, 0x04, 0x17, 0xfb, 0x31, - 0x0b, 0x4e, 0x65, 0xd9, 0xbb, 0xd2, 0x17, 0x0f, 0x97, 0x59, 0x29, 0x73, 0x26, 0x35, 0x9b, 0x77, - 0x44, 0x39, 0x56, 0x18, 0x7d, 0x67, 0x28, 0x3b, 0x5a, 0xac, 0xdd, 0x5b, 0x30, 0x5e, 0x0d, 0x88, - 0x76, 0xb9, 0xbe, 0xc6, 0x9d, 0xd6, 0x79, 0x7f, 0x9e, 0x3d, 0xb2, 0xc3, 0xba, 0xfd, 0x95, 0x02, - 0x9c, 0xe2, 0x56, 0x01, 0x8b, 0x7b, 0xbe, 0xdb, 0xa8, 0xfa, 0x0d, 0x91, 0x5d, 0xee, 0x53, 0x30, - 0xd6, 0xd6, 0x04, 0x8d, 0xdd, 0xe2, 0x46, 0xea, 0x02, 0xc9, 0x58, 0x34, 0xa2, 0x97, 0x62, 0x83, - 0x16, 0x6a, 0xc0, 0x18, 0xd9, 0x73, 0xeb, 0x4a, 0xb5, 0x5c, 0x38, 0xf2, 0x45, 0xa7, 0x5a, 0x59, - 0xd1, 0xe8, 0x60, 0x83, 0xea, 0x23, 0xc8, 0x1b, 0x6c, 0xff, 0xb8, 0x05, 0x8f, 0xe5, 0x44, 0x99, - 0xa4, 0xcd, 0xdd, 0x63, 0xf6, 0x17, 0x62, 0xd9, 0xaa, 0xe6, 0xb8, 0x55, 0x06, 0x16, 0x50, 0xf4, - 0x09, 0x00, 0x6e, 0x55, 0x41, 0x9f, 0xdc, 0xbd, 0xc2, 0xf1, 0x19, 0x91, 0xc4, 0xb4, 0xa0, 0x50, - 0xb2, 0x3e, 0xd6, 0x68, 0xd9, 0x5f, 0x1a, 0x80, 0x41, 0x9e, 0xe3, 0x7c, 0x15, 0x86, 0x77, 0x78, - 0xce, 0x8d, 0x7e, 0xd2, 0x7b, 0xc4, 0xc2, 0x10, 0x5e, 0x80, 0x65, 0x65, 0xb4, 0x06, 0x33, 0x3c, - 0x67, 0x49, 0xb3, 0x4c, 0x9a, 0xce, 0xbe, 0x94, 0xdc, 0xf1, 0x7c, 0x9f, 0x4a, 0x82, 0x59, 0x49, - 0xa3, 0xe0, 0xac, 0x7a, 0xe8, 0x35, 0x98, 0xa0, 0x2f, 0x29, 0xbf, 0x13, 0x49, 0x4a, 0x3c, 0x5b, - 0x89, 0x7a, 0xba, 0x6d, 0x18, 0x50, 0x9c, 0xc0, 0xa6, 0x8f, 0xf9, 0x76, 0x4a, 0x46, 0x39, 0x18, - 0x3f, 0xe6, 0x4d, 0xb9, 0xa4, 0x89, 0xcb, 0x0c, 0x5d, 0x3b, 0xcc, 0xac, 0x77, 0x63, 0x27, 0x20, - 0xe1, 0x8e, 0xdf, 0x6c, 0x30, 0xa6, 0x6f, 0x50, 0x33, 0x74, 0x4d, 0xc0, 0x71, 0xaa, 0x06, 0xa5, - 0xb2, 0xe5, 0xb8, 0xcd, 0x4e, 0x40, 0x62, 0x2a, 0x43, 0x26, 0x95, 0xd5, 0x04, 0x1c, 0xa7, 0x6a, - 0xf4, 0x16, 0xbe, 0x0e, 0x1f, 0x8f, 0xf0, 0x95, 0x2e, 0xd8, 0xd3, 0xd5, 0xc0, 0xa7, 0x27, 0xb6, - 0x8c, 0xd1, 0xa3, 0xcc, 0xa4, 0x87, 0xa5, 0x3b, 0x71, 0x97, 0x68, 0x76, 0xc2, 0x90, 0x94, 0x53, - 0x30, 0x2c, 0x15, 0x6a, 0xc2, 0x91, 0x58, 0x52, 0x41, 0x57, 0x61, 0x54, 0xa4, 0xbc, 0x60, 0xd6, - 0xbc, 0x7c, 0x8d, 0x30, 0xcb, 0x8a, 0x72, 0x5c, 0x8c, 0x75, 0x1c, 0xfb, 0x07, 0x0a, 0x30, 0x93, - 0xe1, 0x8e, 0xc1, 0xcf, 0xc4, 0x6d, 0x37, 0x8c, 0x54, 0xf2, 0x44, 0xed, 0x4c, 0xe4, 0xe5, 0x58, - 0x61, 0xd0, 0x8d, 0xc7, 0x4f, 0xdd, 0xe4, 0x49, 0x2b, 0xcc, 0x9d, 0x05, 0xf4, 0x88, 0x69, 0x08, - 0x2f, 0xc0, 0x40, 0x27, 0x24, 0x32, 0x0e, 0xa5, 0xba, 0x83, 0x98, 0xc2, 0x8d, 0x41, 0xe8, 0x9b, - 0x60, 0x5b, 0xe9, 0xae, 0xb4, 0x37, 0x01, 0xd7, 0x5e, 0x71, 0x18, 0xed, 0x5c, 0x44, 0x3c, 0xc7, - 0x8b, 0xc4, 0xcb, 0x21, 0x0e, 0xa8, 0xc6, 0x4a, 0xb1, 0x80, 0xda, 0x5f, 0x2a, 0xc2, 0xd9, 0x5c, - 0x07, 0x2d, 0xda, 0xf5, 0x96, 0xef, 0xb9, 0x91, 0xaf, 0x4c, 0x56, 0x78, 0x10, 0x35, 0xd2, 0xde, - 0x59, 0x13, 0xe5, 0x58, 0x61, 0xa0, 0x4b, 0x30, 0xc8, 0xc4, 0x75, 0xa9, 0x34, 0x92, 0x4b, 0x65, - 0x1e, 0x55, 0x87, 0x83, 0xfb, 0x4e, 0xd1, 0x7b, 0x91, 0x5e, 0xc7, 0x7e, 0x33, 0x79, 0x3a, 0xd2, - 0xee, 0xfa, 0x7e, 0x13, 0x33, 0x20, 0xfa, 0x80, 0x18, 0xaf, 0x84, 0x8d, 0x06, 0x76, 0x1a, 0x7e, - 0xa8, 0x0d, 0xda, 0xd3, 0x30, 0xbc, 0x4b, 0xf6, 0x03, 0xd7, 0xdb, 0x4e, 0xda, 0xee, 0xdc, 0xe0, - 0xc5, 0x58, 0xc2, 0xcd, 0x8c, 0x60, 0xc3, 0xc7, 0x9d, 0x5b, 0x77, 0xa4, 0xe7, 0x5d, 0xfb, 0x43, - 0x45, 0x98, 0xc4, 0x4b, 0xe5, 0xf7, 0x26, 0xe2, 0x76, 0x7a, 0x22, 0x8e, 0x3b, 0xb7, 0x6e, 0xef, - 0xd9, 0xf8, 0x45, 0x0b, 0x26, 0x59, 0xe2, 0x0d, 0x11, 0x7e, 0xcb, 0xf5, 0xbd, 0x13, 0xe0, 0x6b, - 0x2f, 0xc2, 0x60, 0x40, 0x1b, 0x4d, 0xe6, 0x8f, 0x64, 0x3d, 0xc1, 0x1c, 0x86, 0x9e, 0x80, 0x01, - 0xd6, 0x05, 0x3a, 0x79, 0x63, 0x3c, 0xf5, 0x56, 0xd9, 0x89, 0x1c, 0xcc, 0x4a, 0x59, 0x4c, 0x19, - 0x4c, 0xda, 0x4d, 0x97, 0x77, 0x3a, 0x56, 0xa6, 0xbe, 0x3b, 0x5c, 0xb7, 0x33, 0xbb, 0xf6, 0xce, - 0x62, 0xca, 0x64, 0x93, 0xec, 0xfe, 0x66, 0xfc, 0xe3, 0x02, 0x9c, 0xcf, 0xac, 0xd7, 0x77, 0x4c, - 0x99, 0xee, 0xb5, 0x1f, 0x65, 0x6a, 0x85, 0xe2, 0x09, 0x5a, 0x46, 0x0e, 0xf4, 0xcb, 0xca, 0x0e, - 0xf6, 0x11, 0xea, 0x25, 0x73, 0xc8, 0xde, 0x25, 0xa1, 0x5e, 0x32, 0xfb, 0x96, 0xf3, 0xe6, 0xfd, - 0x8b, 0x42, 0xce, 0xb7, 0xb0, 0xd7, 0xef, 0x65, 0x7a, 0xce, 0x30, 0x60, 0x28, 0x5f, 0x94, 0xfc, - 0x8c, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x22, 0x4c, 0xb6, 0x5c, 0x8f, 0x1e, 0x3e, 0xfb, 0x26, 0x87, - 0xa9, 0x22, 0x71, 0xad, 0x99, 0x60, 0x9c, 0xc4, 0x47, 0xae, 0x16, 0x06, 0xa6, 0x90, 0x9f, 0x91, - 0x3d, 0xb7, 0xb7, 0x0b, 0xa6, 0xa2, 0x59, 0x8d, 0x62, 0x46, 0x48, 0x98, 0x35, 0x4d, 0xe8, 0x51, - 0xec, 0x5f, 0xe8, 0x31, 0x96, 0x2d, 0xf0, 0x98, 0x7b, 0x15, 0xc6, 0x1f, 0x5a, 0xca, 0x6d, 0x7f, - 0xad, 0x08, 0x8f, 0x77, 0xd9, 0xf6, 0xfc, 0xac, 0x37, 0xe6, 0x40, 0x3b, 0xeb, 0x53, 0xf3, 0x50, - 0x85, 0x53, 0x5b, 0x9d, 0x66, 0x73, 0x9f, 0x39, 0x0c, 0x90, 0x86, 0xc4, 0x10, 0x3c, 0xa5, 0x7c, - 0xe9, 0x9f, 0x5a, 0xcd, 0xc0, 0xc1, 0x99, 0x35, 0xe9, 0xcb, 0x81, 0xde, 0x24, 0xfb, 0x8a, 0x54, - 0xe2, 0xe5, 0x80, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x35, 0x98, 0x76, 0xf6, 0x1c, 0x97, 0xc7, 0xd2, - 0x95, 0x04, 0xf8, 0xd3, 0x41, 0x09, 0x27, 0x17, 0x93, 0x08, 0x38, 0x5d, 0x07, 0xbd, 0x0e, 0xc8, - 0xdf, 0x64, 0x66, 0xc5, 0x8d, 0x6b, 0xc4, 0x13, 0xfa, 0x40, 0x36, 0x77, 0xc5, 0xf8, 0x48, 0xb8, - 0x95, 0xc2, 0xc0, 0x19, 0xb5, 0x12, 0xe1, 0x4e, 0x86, 0xf2, 0xc3, 0x9d, 0x74, 0x3f, 0x17, 0x7b, - 0x66, 0xf5, 0xf8, 0x8f, 0x16, 0xbd, 0xbe, 0x38, 0x93, 0x6f, 0x46, 0x07, 0x7c, 0x95, 0xd9, 0xf3, - 0x71, 0xc1, 0xa5, 0x16, 0xa4, 0xe3, 0xb4, 0x66, 0xcf, 0x17, 0x03, 0xb1, 0x89, 0xcb, 0x17, 0x44, - 0x18, 0xfb, 0x86, 0x1a, 0x2c, 0xbe, 0x08, 0x61, 0xa4, 0x30, 0xd0, 0x27, 0x61, 0xb8, 0xe1, 0xee, - 0xb9, 0xa1, 0x10, 0xdb, 0x1c, 0x59, 0x47, 0x12, 0x9f, 0x83, 0x65, 0x4e, 0x06, 0x4b, 0x7a, 0xf6, - 0x0f, 0x15, 0x60, 0x5c, 0xb6, 0xf8, 0x46, 0xc7, 0x8f, 0x9c, 0x13, 0xb8, 0x96, 0xaf, 0x19, 0xd7, - 0xf2, 0x07, 0xba, 0xc5, 0x71, 0x62, 0x5d, 0xca, 0xbd, 0x8e, 0x6f, 0x25, 0xae, 0xe3, 0xa7, 0x7a, - 0x93, 0xea, 0x7e, 0x0d, 0xff, 0x43, 0x0b, 0xa6, 0x0d, 0xfc, 0x13, 0xb8, 0x0d, 0x56, 0xcd, 0xdb, - 0xe0, 0xc9, 0x9e, 0xdf, 0x90, 0x73, 0x0b, 0x7c, 0x6f, 0x31, 0xd1, 0x77, 0x76, 0xfa, 0xbf, 0x05, - 0x03, 0x3b, 0x4e, 0xd0, 0xe8, 0x16, 0xb7, 0x3e, 0x55, 0x69, 0xe1, 0xba, 0x13, 0x08, 0x85, 0xe8, - 0xb3, 0x2a, 0x21, 0xba, 0x13, 0xf4, 0x56, 0x86, 0xb2, 0xa6, 0xd0, 0xcb, 0x30, 0x14, 0xd6, 0xfd, - 0xb6, 0x72, 0x17, 0xb8, 0xc0, 0x93, 0xa5, 0xd3, 0x92, 0xc3, 0x83, 0x79, 0x64, 0x36, 0x47, 0x8b, - 0xb1, 0xc0, 0x47, 0x9f, 0x82, 0x71, 0xf6, 0x4b, 0x59, 0x27, 0x15, 0xf3, 0x73, 0x5c, 0xd5, 0x74, - 0x44, 0x6e, 0xba, 0x67, 0x14, 0x61, 0x93, 0xd4, 0xdc, 0x36, 0x94, 0xd4, 0x67, 0x3d, 0x52, 0x25, - 0xe4, 0xbf, 0x2b, 0xc2, 0x4c, 0xc6, 0x9a, 0x43, 0xa1, 0x31, 0x13, 0x57, 0xfb, 0x5c, 0xaa, 0xef, - 0x70, 0x2e, 0x42, 0xf6, 0x1a, 0x6a, 0x88, 0xb5, 0xd5, 0x77, 0xa3, 0xb7, 0x43, 0x92, 0x6c, 0x94, - 0x16, 0xf5, 0x6e, 0x94, 0x36, 0x76, 0x62, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa4, 0x73, 0xfa, - 0x67, 0x45, 0x38, 0x95, 0x15, 0x5a, 0x0e, 0x7d, 0x3e, 0x91, 0x35, 0xf1, 0xc5, 0x7e, 0x83, 0xd2, - 0xf1, 0x54, 0x8a, 0x5c, 0xd8, 0xbc, 0xb4, 0x60, 0xe6, 0x51, 0xec, 0x39, 0xcc, 0xa2, 0x4d, 0x16, - 0xf7, 0x20, 0xe0, 0xd9, 0x2e, 0xe5, 0xf1, 0xf1, 0xe1, 0xbe, 0x3b, 0x20, 0xd2, 0x64, 0x86, 0x09, - 0xcb, 0x07, 0x59, 0xdc, 0xdb, 0xf2, 0x41, 0xb6, 0x3c, 0xe7, 0xc2, 0xa8, 0xf6, 0x35, 0x8f, 0x74, - 0xc6, 0x77, 0xe9, 0x6d, 0xa5, 0xf5, 0xfb, 0x91, 0xce, 0xfa, 0x8f, 0x5b, 0x90, 0x30, 0x86, 0x57, - 0x62, 0x31, 0x2b, 0x57, 0x2c, 0x76, 0x01, 0x06, 0x02, 0xbf, 0x49, 0x92, 0xe9, 0x05, 0xb1, 0xdf, - 0x24, 0x98, 0x41, 0x28, 0x46, 0x14, 0x0b, 0x3b, 0xc6, 0xf4, 0x87, 0x9c, 0x78, 0xa2, 0x5d, 0x84, - 0xc1, 0x26, 0xd9, 0x23, 0xcd, 0x64, 0x16, 0x98, 0x9b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x8b, 0x03, - 0x70, 0xae, 0x6b, 0xe4, 0x10, 0xfa, 0x1c, 0xda, 0x76, 0x22, 0x72, 0xcf, 0xd9, 0x4f, 0xa6, 0x6b, - 0xb8, 0xc6, 0x8b, 0xb1, 0x84, 0x33, 0x77, 0x25, 0x1e, 0x75, 0x39, 0x21, 0x44, 0x14, 0xc1, 0x96, - 0x05, 0xd4, 0x14, 0x4a, 0x15, 0x8f, 0x43, 0x28, 0xf5, 0x3c, 0x40, 0x18, 0x36, 0xb9, 0xc9, 0x50, - 0x43, 0xf8, 0x41, 0xc5, 0xd1, 0xb9, 0x6b, 0x37, 0x05, 0x04, 0x6b, 0x58, 0xa8, 0x0c, 0x53, 0xed, - 0xc0, 0x8f, 0xb8, 0x4c, 0xb6, 0xcc, 0xad, 0xea, 0x06, 0xcd, 0xa0, 0x0d, 0xd5, 0x04, 0x1c, 0xa7, - 0x6a, 0xa0, 0x97, 0x60, 0x54, 0x04, 0x72, 0xa8, 0xfa, 0x7e, 0x53, 0x88, 0x81, 0x94, 0xa1, 0x59, - 0x2d, 0x06, 0x61, 0x1d, 0x4f, 0xab, 0xc6, 0x04, 0xbd, 0xc3, 0x99, 0xd5, 0xb8, 0xb0, 0x57, 0xc3, - 0x4b, 0x84, 0x99, 0x1c, 0xe9, 0x2b, 0xcc, 0x64, 0x2c, 0x18, 0x2b, 0xf5, 0xad, 0x44, 0x83, 0x9e, - 0xa2, 0xa4, 0x9f, 0x1b, 0x80, 0x19, 0xb1, 0x70, 0x1e, 0xf5, 0x72, 0xb9, 0x9d, 0x5e, 0x2e, 0xc7, - 0x21, 0x3a, 0x7b, 0x6f, 0xcd, 0x9c, 0xf4, 0x9a, 0xf9, 0x61, 0x0b, 0x4c, 0xf6, 0x0a, 0xfd, 0x5f, - 0xb9, 0xf9, 0x6e, 0x5e, 0xca, 0x65, 0xd7, 0x1a, 0xf2, 0x02, 0x79, 0x87, 0x99, 0x6f, 0xec, 0xff, - 0x60, 0xc1, 0x93, 0x3d, 0x29, 0xa2, 0x15, 0x28, 0x31, 0x1e, 0x50, 0x7b, 0x9d, 0x3d, 0xa5, 0xac, - 0x6e, 0x25, 0x20, 0x87, 0x25, 0x8d, 0x6b, 0xa2, 0x95, 0x54, 0x62, 0xa1, 0xa7, 0x33, 0x12, 0x0b, - 0x9d, 0x36, 0x86, 0xe7, 0x21, 0x33, 0x0b, 0xfd, 0x20, 0xbd, 0x71, 0x0c, 0x8f, 0x17, 0xf4, 0x61, - 0x43, 0xec, 0x67, 0x27, 0xc4, 0x7e, 0xc8, 0xc4, 0xd6, 0xee, 0x90, 0x8f, 0xc3, 0x14, 0x8b, 0xf0, - 0xc4, 0x6c, 0xc0, 0x85, 0x2f, 0x4e, 0x21, 0xb6, 0xf3, 0xbc, 0x99, 0x80, 0xe1, 0x14, 0xb6, 0xfd, - 0x47, 0x45, 0x18, 0xe2, 0xdb, 0xef, 0x04, 0xde, 0x84, 0xcf, 0x40, 0xc9, 0x6d, 0xb5, 0x3a, 0x3c, - 0x57, 0xcc, 0x20, 0x77, 0xc0, 0xa5, 0xf3, 0x54, 0x91, 0x85, 0x38, 0x86, 0xa3, 0x55, 0x21, 0x71, - 0xee, 0x12, 0x44, 0x92, 0x77, 0x7c, 0xa1, 0xec, 0x44, 0x0e, 0x67, 0x70, 0xd4, 0x3d, 0x1b, 0xcb, - 0xa6, 0xd1, 0x67, 0x00, 0xc2, 0x28, 0x70, 0xbd, 0x6d, 0x5a, 0x26, 0x62, 0xa6, 0x7e, 0xb0, 0x0b, - 0xb5, 0x9a, 0x42, 0xe6, 0x34, 0xe3, 0x33, 0x47, 0x01, 0xb0, 0x46, 0x11, 0x2d, 0x18, 0x37, 0xfd, - 0x5c, 0x62, 0xee, 0x80, 0x53, 0x8d, 0xe7, 0x6c, 0xee, 0x23, 0x50, 0x52, 0xc4, 0x7b, 0xc9, 0x9f, - 0xc6, 0x74, 0xb6, 0xe8, 0x63, 0x30, 0x99, 0xe8, 0xdb, 0x91, 0xc4, 0x57, 0xbf, 0x64, 0xc1, 0x24, - 0xef, 0xcc, 0x8a, 0xb7, 0x27, 0x6e, 0x83, 0xb7, 0xe1, 0x54, 0x33, 0xe3, 0x54, 0x16, 0xd3, 0xdf, - 0xff, 0x29, 0xae, 0xc4, 0x55, 0x59, 0x50, 0x9c, 0xd9, 0x06, 0xba, 0x4c, 0x77, 0x1c, 0x3d, 0x75, - 0x9d, 0xa6, 0xf0, 0xc7, 0x1d, 0xe3, 0xbb, 0x8d, 0x97, 0x61, 0x05, 0xb5, 0x7f, 0xd7, 0x82, 0x69, - 0xde, 0xf3, 0x1b, 0x64, 0x5f, 0x9d, 0x4d, 0xdf, 0xc8, 0xbe, 0x8b, 0x2c, 0x65, 0x85, 0x9c, 0x2c, - 0x65, 0xfa, 0xa7, 0x15, 0xbb, 0x7e, 0xda, 0x57, 0x2c, 0x10, 0x2b, 0xe4, 0x04, 0x84, 0x10, 0xdf, - 0x6e, 0x0a, 0x21, 0xe6, 0xf2, 0x37, 0x41, 0x8e, 0xf4, 0xe1, 0xcf, 0x2d, 0x98, 0xe2, 0x08, 0xb1, - 0xb6, 0xfc, 0x1b, 0x3a, 0x0f, 0xfd, 0xe4, 0x32, 0xbe, 0x41, 0xf6, 0x37, 0xfc, 0xaa, 0x13, 0xed, - 0x64, 0x7f, 0x94, 0x31, 0x59, 0x03, 0x5d, 0x27, 0xab, 0x21, 0x37, 0xd0, 0x11, 0x12, 0xa4, 0x1f, - 0x39, 0x89, 0x87, 0xfd, 0x75, 0x0b, 0x10, 0x6f, 0xc6, 0x60, 0xdc, 0x28, 0x3b, 0xc4, 0x4a, 0xb5, - 0x8b, 0x2e, 0x3e, 0x9a, 0x14, 0x04, 0x6b, 0x58, 0xc7, 0x32, 0x3c, 0x09, 0x93, 0x87, 0x62, 0x6f, - 0x93, 0x87, 0x23, 0x8c, 0xe8, 0xbf, 0x1a, 0x82, 0xa4, 0xd7, 0x0f, 0xba, 0x03, 0x63, 0x75, 0xa7, - 0xed, 0x6c, 0xba, 0x4d, 0x37, 0x72, 0x49, 0xd8, 0xcd, 0x28, 0x6b, 0x59, 0xc3, 0x13, 0x4a, 0x6a, - 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x02, 0x40, 0x3b, 0x70, 0xf7, 0xdc, 0x26, 0xd9, 0x66, 0xb2, 0x12, - 0x16, 0x01, 0x80, 0x5b, 0x1a, 0xc9, 0x52, 0xac, 0x61, 0x64, 0xb8, 0x58, 0x17, 0x1f, 0xb1, 0x8b, - 0x35, 0x9c, 0x98, 0x8b, 0xf5, 0xc0, 0x91, 0x5c, 0xac, 0x47, 0x8e, 0xec, 0x62, 0x3d, 0xd8, 0x97, - 0x8b, 0x35, 0x86, 0x33, 0x92, 0xf7, 0xa4, 0xff, 0x57, 0xdd, 0x26, 0x11, 0x0f, 0x0e, 0x1e, 0xb6, - 0x60, 0xee, 0xc1, 0xc1, 0xfc, 0x19, 0x9c, 0x89, 0x81, 0x73, 0x6a, 0xa2, 0x4f, 0xc0, 0xac, 0xd3, - 0x6c, 0xfa, 0xf7, 0xd4, 0xa4, 0xae, 0x84, 0x75, 0xa7, 0xc9, 0x95, 0x10, 0xc3, 0x8c, 0xea, 0x13, - 0x0f, 0x0e, 0xe6, 0x67, 0x17, 0x73, 0x70, 0x70, 0x6e, 0x6d, 0xf4, 0x51, 0x28, 0xb5, 0x03, 0xbf, - 0xbe, 0xa6, 0xb9, 0x26, 0x9e, 0xa7, 0x03, 0x58, 0x95, 0x85, 0x87, 0x07, 0xf3, 0xe3, 0xea, 0x0f, - 0xbb, 0xf0, 0xe3, 0x0a, 0x19, 0x3e, 0xd3, 0xa3, 0xc7, 0xea, 0x33, 0xbd, 0x0b, 0x33, 0x35, 0x12, - 0xb8, 0x2c, 0x9d, 0x7a, 0x23, 0x3e, 0x9f, 0x36, 0xa0, 0x14, 0x24, 0x4e, 0xe4, 0xbe, 0x02, 0x3b, - 0x6a, 0xd9, 0x14, 0xe4, 0x09, 0x1c, 0x13, 0xb2, 0xff, 0xa7, 0x05, 0xc3, 0xc2, 0xcb, 0xe7, 0x04, - 0xb8, 0xc6, 0x45, 0x43, 0x93, 0x30, 0x9f, 0x3d, 0x60, 0xac, 0x33, 0xb9, 0x3a, 0x84, 0x4a, 0x42, - 0x87, 0xf0, 0x64, 0x37, 0x22, 0xdd, 0xb5, 0x07, 0x7f, 0xa5, 0x48, 0xb9, 0x77, 0xc3, 0xdf, 0xf4, - 0xd1, 0x0f, 0xc1, 0x3a, 0x0c, 0x87, 0xc2, 0xdf, 0xb1, 0x90, 0x6f, 0xa0, 0x9f, 0x9c, 0xc4, 0xd8, - 0x8e, 0x4d, 0x78, 0x38, 0x4a, 0x22, 0x99, 0x8e, 0x94, 0xc5, 0x47, 0xe8, 0x48, 0xd9, 0xcb, 0x23, - 0x77, 0xe0, 0x38, 0x3c, 0x72, 0xed, 0xaf, 0xb2, 0x9b, 0x53, 0x2f, 0x3f, 0x01, 0xa6, 0xea, 0x9a, - 0x79, 0xc7, 0xda, 0x5d, 0x56, 0x96, 0xe8, 0x54, 0x0e, 0x73, 0xf5, 0x0b, 0x16, 0x9c, 0xcb, 0xf8, - 0x2a, 0x8d, 0xd3, 0x7a, 0x16, 0x46, 0x9c, 0x4e, 0xc3, 0x55, 0x7b, 0x59, 0xd3, 0x27, 0x2e, 0x8a, - 0x72, 0xac, 0x30, 0xd0, 0x32, 0x4c, 0x93, 0xfb, 0x6d, 0x97, 0xab, 0x52, 0x75, 0xab, 0xd6, 0x22, - 0x77, 0x0d, 0x5b, 0x49, 0x02, 0x71, 0x1a, 0x5f, 0x45, 0x41, 0x29, 0xe6, 0x46, 0x41, 0xf9, 0xdb, - 0x16, 0x8c, 0x2a, 0x8f, 0xbf, 0x47, 0x3e, 0xda, 0x1f, 0x37, 0x47, 0xfb, 0xf1, 0x2e, 0xa3, 0x9d, - 0x33, 0xcc, 0xbf, 0x5d, 0x50, 0xfd, 0xad, 0xfa, 0x41, 0xd4, 0x07, 0x07, 0xf7, 0xf0, 0x76, 0xf8, - 0x57, 0x61, 0xd4, 0x69, 0xb7, 0x25, 0x40, 0xda, 0xa0, 0xb1, 0x30, 0xbd, 0x71, 0x31, 0xd6, 0x71, - 0x94, 0x5b, 0x40, 0x31, 0xd7, 0x2d, 0xa0, 0x01, 0x10, 0x39, 0xc1, 0x36, 0x89, 0x68, 0x99, 0x88, - 0x58, 0x96, 0x7f, 0xde, 0x74, 0x22, 0xb7, 0xb9, 0xe0, 0x7a, 0x51, 0x18, 0x05, 0x0b, 0x15, 0x2f, - 0xba, 0x15, 0xf0, 0x27, 0xa4, 0x16, 0x12, 0x48, 0xd1, 0xc2, 0x1a, 0x5d, 0xe9, 0xdd, 0xce, 0xda, - 0x18, 0x34, 0x8d, 0x19, 0xd6, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x47, 0xd8, 0xed, 0xc3, 0xc6, 0xf4, - 0x68, 0x31, 0x74, 0xfe, 0xee, 0x98, 0x9a, 0x0d, 0xa6, 0xc9, 0x2c, 0xeb, 0x91, 0x7a, 0xba, 0x1f, - 0xf6, 0xb4, 0x61, 0xdd, 0x49, 0x2d, 0x0e, 0xe7, 0x83, 0xbe, 0x23, 0x65, 0xa0, 0xf2, 0x5c, 0x8f, - 0x5b, 0xe3, 0x08, 0x26, 0x29, 0x2c, 0x67, 0x07, 0xcb, 0x68, 0x50, 0xa9, 0x8a, 0x7d, 0xa1, 0xe5, - 0xec, 0x10, 0x00, 0x1c, 0xe3, 0x50, 0x66, 0x4a, 0xfd, 0x09, 0x67, 0x51, 0x1c, 0xbb, 0x52, 0x61, - 0x87, 0x58, 0xc3, 0x40, 0x57, 0x84, 0x40, 0x81, 0xeb, 0x05, 0x1e, 0x4f, 0x08, 0x14, 0xe4, 0x70, - 0x69, 0x52, 0xa0, 0xab, 0x30, 0xaa, 0xd2, 0x03, 0x57, 0x79, 0xd6, 0x59, 0xb1, 0xcc, 0x56, 0xe2, - 0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0c, 0xb9, 0x9c, 0x4d, 0x05, 0x14, 0xe6, 0xf2, 0xca, 0x0f, - 0x4a, 0x2b, 0xa0, 0x9a, 0x09, 0x3e, 0x64, 0x45, 0xfc, 0x74, 0x92, 0x1e, 0xe8, 0x49, 0x12, 0xe8, - 0x35, 0x98, 0x68, 0xfa, 0x4e, 0x63, 0xc9, 0x69, 0x3a, 0x5e, 0x9d, 0x8d, 0xcf, 0x88, 0x99, 0x65, - 0xf2, 0xa6, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0xbc, 0xe9, 0x25, 0x22, 0x08, 0xb6, 0xe3, 0x6d, 0x93, - 0x50, 0x24, 0x7b, 0x65, 0xcc, 0xdb, 0xcd, 0x1c, 0x1c, 0x9c, 0x5b, 0x1b, 0xbd, 0x0c, 0x63, 0xf2, - 0xf3, 0xb5, 0x80, 0x0d, 0xb1, 0x87, 0x85, 0x06, 0xc3, 0x06, 0x26, 0xba, 0x07, 0xa7, 0xe5, 0xff, - 0x8d, 0xc0, 0xd9, 0xda, 0x72, 0xeb, 0xc2, 0x8b, 0x99, 0xbb, 0x62, 0x2e, 0x4a, 0x7f, 0xc1, 0x95, - 0x2c, 0xa4, 0xc3, 0x83, 0xf9, 0x0b, 0x62, 0xd4, 0x32, 0xe1, 0x6c, 0x12, 0xb3, 0xe9, 0xa3, 0x35, - 0x98, 0xd9, 0x21, 0x4e, 0x33, 0xda, 0x59, 0xde, 0x21, 0xf5, 0x5d, 0xb9, 0xe9, 0x58, 0x18, 0x08, - 0xcd, 0x2f, 0xe1, 0x7a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0x6f, 0xc2, 0x6c, 0xbb, 0xb3, 0xd9, 0x74, - 0xc3, 0x9d, 0x75, 0x3f, 0x62, 0xa6, 0x40, 0x2a, 0xdb, 0xb0, 0x88, 0x17, 0xa1, 0x02, 0x6d, 0x54, - 0x73, 0xf0, 0x70, 0x2e, 0x05, 0xf4, 0x36, 0x9c, 0x4e, 0x2c, 0x06, 0xe1, 0x31, 0x3f, 0x91, 0x9f, - 0x52, 0xa0, 0x96, 0x55, 0x41, 0x04, 0x9f, 0xc8, 0x02, 0xe1, 0xec, 0x26, 0xd0, 0x2b, 0x00, 0x6e, - 0x7b, 0xd5, 0x69, 0xb9, 0x4d, 0xfa, 0x5c, 0x9c, 0x61, 0xeb, 0x84, 0x3e, 0x1d, 0xa0, 0x52, 0x95, - 0xa5, 0xf4, 0x7c, 0x16, 0xff, 0xf6, 0xb1, 0x86, 0x8d, 0xaa, 0x30, 0x21, 0xfe, 0xed, 0x8b, 0x69, - 0x9d, 0x56, 0xce, 0xe9, 0x13, 0xb2, 0x86, 0x9a, 0x4b, 0x64, 0x96, 0xb0, 0xd9, 0x4b, 0xd4, 0x47, - 0xdb, 0x70, 0x4e, 0x24, 0xa6, 0x26, 0xfa, 0x3a, 0x95, 0xf3, 0x10, 0xb2, 0x58, 0xfe, 0x23, 0xdc, - 0xed, 0x61, 0xb1, 0x1b, 0x22, 0xee, 0x4e, 0x87, 0xde, 0xef, 0xfa, 0x72, 0xe7, 0xee, 0xa0, 0xa7, - 0xb9, 0x79, 0x12, 0xbd, 0xdf, 0x6f, 0x26, 0x81, 0x38, 0x8d, 0x8f, 0x42, 0x38, 0xed, 0x7a, 0x59, - 0xab, 0xfb, 0x0c, 0x23, 0xf4, 0x31, 0xee, 0x09, 0xdb, 0x7d, 0x65, 0x67, 0xc2, 0xf9, 0xca, 0xce, - 0xa4, 0xfd, 0xce, 0xac, 0xf0, 0x7e, 0xc7, 0xa2, 0xb5, 0x35, 0x4e, 0x1d, 0x7d, 0x16, 0xc6, 0xf4, - 0x0f, 0x13, 0x5c, 0xc7, 0xa5, 0x6c, 0x46, 0x56, 0x3b, 0x1f, 0x38, 0x9f, 0xaf, 0xce, 0x00, 0x1d, - 0x86, 0x0d, 0x8a, 0xa8, 0x9e, 0xe1, 0x33, 0x7e, 0xa5, 0x3f, 0xae, 0xa6, 0x7f, 0x23, 0x34, 0x02, - 0xd9, 0xcb, 0x1e, 0xdd, 0x84, 0x91, 0x7a, 0xd3, 0x25, 0x5e, 0x54, 0xa9, 0x76, 0x8b, 0xf2, 0xb6, - 0x2c, 0x70, 0xc4, 0x3e, 0x12, 0xa1, 0xf9, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2f, 0xc0, 0x7c, - 0x8f, 0x3c, 0x0f, 0x09, 0x95, 0x94, 0xd5, 0x97, 0x4a, 0x6a, 0x51, 0xa6, 0xd4, 0x5e, 0x4f, 0x48, - 0xbb, 0x12, 0xe9, 0xb2, 0x63, 0x99, 0x57, 0x12, 0xbf, 0x6f, 0x17, 0x01, 0x5d, 0xab, 0x35, 0xd0, - 0xd3, 0xc9, 0xc5, 0xd0, 0x66, 0x0f, 0xf6, 0xff, 0x04, 0xce, 0xd5, 0x4c, 0xda, 0x5f, 0x2d, 0xc0, - 0x69, 0x35, 0x84, 0xdf, 0xba, 0x03, 0x77, 0x3b, 0x3d, 0x70, 0xc7, 0xa0, 0xd7, 0xb5, 0x6f, 0xc1, - 0x10, 0x0f, 0x5b, 0xd7, 0x07, 0xeb, 0x7d, 0xd1, 0x8c, 0xf0, 0xaa, 0xb8, 0x3d, 0x23, 0xca, 0xeb, - 0xf7, 0x5b, 0x30, 0xb9, 0xb1, 0x5c, 0xad, 0xf9, 0xf5, 0x5d, 0x12, 0x2d, 0xf2, 0xa7, 0x12, 0xd6, - 0xbc, 0x6b, 0x1f, 0x86, 0x3d, 0xce, 0x62, 0xbc, 0x2f, 0xc0, 0xc0, 0x8e, 0x1f, 0x46, 0x49, 0xa3, - 0x8f, 0xeb, 0x7e, 0x18, 0x61, 0x06, 0xb1, 0x7f, 0xcf, 0x82, 0xc1, 0x0d, 0xc7, 0xf5, 0x22, 0xa9, - 0x20, 0xb0, 0x72, 0x14, 0x04, 0xfd, 0x7c, 0x17, 0x7a, 0x09, 0x86, 0xc8, 0xd6, 0x16, 0xa9, 0x47, - 0x62, 0x56, 0x65, 0x68, 0x82, 0xa1, 0x15, 0x56, 0x4a, 0x79, 0x41, 0xd6, 0x18, 0xff, 0x8b, 0x05, - 0x32, 0xba, 0x0b, 0xa5, 0xc8, 0x6d, 0x91, 0xc5, 0x46, 0x43, 0xa8, 0xcd, 0x1f, 0x22, 0xbc, 0xc2, - 0x86, 0x24, 0x80, 0x63, 0x5a, 0xf6, 0x97, 0x0a, 0x00, 0x71, 0xbc, 0x9f, 0x5e, 0x9f, 0xb8, 0x94, - 0x52, 0xa8, 0x5e, 0xca, 0x50, 0xa8, 0xa2, 0x98, 0x60, 0x86, 0x36, 0x55, 0x0d, 0x53, 0xb1, 0xaf, - 0x61, 0x1a, 0x38, 0xca, 0x30, 0x2d, 0xc3, 0x74, 0x1c, 0xaf, 0xc8, 0x0c, 0xd7, 0xc6, 0xae, 0xcf, - 0x8d, 0x24, 0x10, 0xa7, 0xf1, 0x6d, 0x02, 0x17, 0x54, 0xd8, 0x16, 0x71, 0xa3, 0x31, 0xab, 0x6c, - 0x5d, 0x41, 0xdd, 0x63, 0x9c, 0x62, 0x8d, 0x71, 0x21, 0x57, 0x63, 0xfc, 0x53, 0x16, 0x9c, 0x4a, - 0xb6, 0xc3, 0xfc, 0x71, 0xbf, 0x68, 0xc1, 0x69, 0xa6, 0x37, 0x67, 0xad, 0xa6, 0xb5, 0xf4, 0x2f, - 0x76, 0x0d, 0x45, 0x93, 0xd3, 0xe3, 0x38, 0x06, 0xc6, 0x5a, 0x16, 0x69, 0x9c, 0xdd, 0xa2, 0xfd, - 0xef, 0x0b, 0x30, 0x9b, 0x17, 0xc3, 0x86, 0x39, 0x6d, 0x38, 0xf7, 0x6b, 0xbb, 0xe4, 0x9e, 0x30, - 0x8d, 0x8f, 0x9d, 0x36, 0x78, 0x31, 0x96, 0xf0, 0x64, 0xe8, 0xfe, 0x42, 0x9f, 0xa1, 0xfb, 0x77, - 0x60, 0xfa, 0xde, 0x0e, 0xf1, 0x6e, 0x7b, 0xa1, 0x13, 0xb9, 0xe1, 0x96, 0xcb, 0x74, 0xcc, 0x7c, - 0xdd, 0xbc, 0x22, 0x0d, 0xd8, 0xef, 0x26, 0x11, 0x0e, 0x0f, 0xe6, 0xcf, 0x19, 0x05, 0x71, 0x97, - 0xf9, 0x41, 0x82, 0xd3, 0x44, 0xd3, 0x99, 0x0f, 0x06, 0x1e, 0x61, 0xe6, 0x03, 0xfb, 0x8b, 0x16, - 0x9c, 0xcd, 0x4d, 0xba, 0x8a, 0x2e, 0xc3, 0x88, 0xd3, 0x76, 0xb9, 0x98, 0x5e, 0x1c, 0xa3, 0x4c, - 0x1c, 0x54, 0xad, 0x70, 0x21, 0xbd, 0x82, 0xaa, 0x74, 0xf3, 0x85, 0xdc, 0x74, 0xf3, 0x3d, 0xb3, - 0xc7, 0xdb, 0xdf, 0x67, 0x81, 0x70, 0x38, 0xed, 0xe3, 0xec, 0xfe, 0x14, 0x8c, 0xed, 0xa5, 0xb3, - 0x23, 0x5d, 0xc8, 0xf7, 0xc0, 0x15, 0x39, 0x91, 0x14, 0x43, 0x66, 0x64, 0x42, 0x32, 0x68, 0xd9, - 0x0d, 0x10, 0xd0, 0x32, 0x61, 0x42, 0xe8, 0xde, 0xbd, 0x79, 0x1e, 0xa0, 0xc1, 0x70, 0xb5, 0x9c, - 0xfd, 0xea, 0x66, 0x2e, 0x2b, 0x08, 0xd6, 0xb0, 0xec, 0x7f, 0x53, 0x80, 0x51, 0x99, 0x8d, 0xa7, - 0xe3, 0xf5, 0x23, 0x2a, 0x3a, 0x52, 0x7a, 0x4e, 0x74, 0x05, 0x4a, 0x4c, 0x96, 0x59, 0x8d, 0x25, - 0x6c, 0x4a, 0x92, 0xb0, 0x26, 0x01, 0x38, 0xc6, 0xa1, 0xbb, 0x28, 0xec, 0x6c, 0x32, 0xf4, 0x84, - 0x7b, 0x64, 0x8d, 0x17, 0x63, 0x09, 0x47, 0x9f, 0x80, 0x29, 0x5e, 0x2f, 0xf0, 0xdb, 0xce, 0x36, - 0xd7, 0x7f, 0x0c, 0xaa, 0x00, 0x0a, 0x53, 0x6b, 0x09, 0xd8, 0xe1, 0xc1, 0xfc, 0xa9, 0x64, 0x19, - 0x53, 0xec, 0xa5, 0xa8, 0x30, 0x33, 0x27, 0xde, 0x08, 0xdd, 0xfd, 0x29, 0xeb, 0xa8, 0x18, 0x84, - 0x75, 0x3c, 0xfb, 0xb3, 0x80, 0xd2, 0x79, 0x89, 0xd0, 0xeb, 0xdc, 0xb6, 0xd5, 0x0d, 0x48, 0xa3, - 0x9b, 0xa2, 0x4f, 0x0f, 0x13, 0x20, 0x3d, 0x9b, 0x78, 0x2d, 0xac, 0xea, 0xdb, 0xff, 0x6f, 0x11, - 0xa6, 0x92, 0xbe, 0xdc, 0xe8, 0x3a, 0x0c, 0x71, 0xd6, 0x43, 0x90, 0xef, 0x62, 0x47, 0xa2, 0x79, - 0x80, 0xb3, 0x43, 0x58, 0x70, 0x2f, 0xa2, 0x3e, 0x7a, 0x13, 0x46, 0x1b, 0xfe, 0x3d, 0xef, 0x9e, - 0x13, 0x34, 0x16, 0xab, 0x15, 0xb1, 0x9c, 0x33, 0x1f, 0xb6, 0xe5, 0x18, 0x4d, 0xf7, 0x2a, 0x67, - 0x3a, 0xd3, 0x18, 0x84, 0x75, 0x72, 0x68, 0x83, 0x05, 0x33, 0xdf, 0x72, 0xb7, 0xd7, 0x9c, 0x76, - 0x37, 0x47, 0x87, 0x65, 0x89, 0xa4, 0x51, 0x1e, 0x17, 0x11, 0xcf, 0x39, 0x00, 0xc7, 0x84, 0xd0, - 0xe7, 0x61, 0x26, 0xcc, 0x11, 0xb7, 0xe7, 0xa5, 0xa9, 0xeb, 0x26, 0x81, 0x5e, 0x7a, 0xec, 0xc1, - 0xc1, 0xfc, 0x4c, 0x96, 0x60, 0x3e, 0xab, 0x19, 0xfb, 0xc7, 0x4e, 0x81, 0xb1, 0x89, 0x8d, 0xac, - 0xa5, 0xd6, 0x31, 0x65, 0x2d, 0xc5, 0x30, 0x42, 0x5a, 0xed, 0x68, 0xbf, 0xec, 0x06, 0xdd, 0x72, - 0x86, 0xaf, 0x08, 0x9c, 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x76, 0x6a, 0xd9, 0xe2, 0x37, 0x30, - 0xb5, 0xec, 0xc0, 0x09, 0xa6, 0x96, 0x5d, 0x87, 0xe1, 0x6d, 0x37, 0xc2, 0xa4, 0xed, 0x0b, 0xa6, - 0x3f, 0x73, 0x1d, 0x5e, 0xe3, 0x28, 0xe9, 0x24, 0x86, 0x02, 0x80, 0x25, 0x11, 0xf4, 0xba, 0xda, - 0x81, 0x43, 0xf9, 0x0f, 0xf3, 0xb4, 0xc1, 0x43, 0xe6, 0x1e, 0x14, 0x09, 0x64, 0x87, 0x1f, 0x36, - 0x81, 0xec, 0xaa, 0x4c, 0xfb, 0x3a, 0x92, 0xef, 0x95, 0xc4, 0xb2, 0xba, 0xf6, 0x48, 0xf6, 0x7a, - 0x47, 0x4f, 0x95, 0x5b, 0xca, 0x3f, 0x09, 0x54, 0x16, 0xdc, 0x3e, 0x13, 0xe4, 0x7e, 0x9f, 0x05, - 0xa7, 0xdb, 0x59, 0x59, 0xa3, 0x85, 0x6d, 0xc0, 0x4b, 0x7d, 0x27, 0xa6, 0x36, 0x1a, 0x64, 0x32, - 0xb5, 0x4c, 0x34, 0x9c, 0xdd, 0x1c, 0x1d, 0xe8, 0x60, 0xb3, 0x21, 0x74, 0xd4, 0x17, 0x73, 0x32, - 0xed, 0x76, 0xc9, 0xaf, 0xbb, 0x91, 0x91, 0xd5, 0xf5, 0xfd, 0x79, 0x59, 0x5d, 0xfb, 0xce, 0xe5, - 0xfa, 0xba, 0xca, 0xb1, 0x3b, 0x9e, 0xbf, 0x94, 0x78, 0x06, 0xdd, 0x9e, 0x99, 0x75, 0x5f, 0x57, - 0x99, 0x75, 0xbb, 0x44, 0xaa, 0xe5, 0x79, 0x73, 0x7b, 0xe6, 0xd3, 0xd5, 0x72, 0xe2, 0x4e, 0x1e, - 0x4f, 0x4e, 0x5c, 0xe3, 0xaa, 0xe1, 0x69, 0x59, 0x9f, 0xe9, 0x71, 0xd5, 0x18, 0x74, 0xbb, 0x5f, - 0x36, 0x3c, 0xff, 0xef, 0xf4, 0x43, 0xe5, 0xff, 0xbd, 0xa3, 0xe7, 0xd3, 0x45, 0x3d, 0x12, 0xc6, - 0x52, 0xa4, 0x3e, 0xb3, 0xe8, 0xde, 0xd1, 0x2f, 0xc0, 0x99, 0x7c, 0xba, 0xea, 0x9e, 0x4b, 0xd3, - 0xcd, 0xbc, 0x02, 0x53, 0xd9, 0x79, 0x4f, 0x9d, 0x4c, 0x76, 0xde, 0xd3, 0xc7, 0x9e, 0x9d, 0xf7, - 0xcc, 0x09, 0x64, 0xe7, 0x7d, 0xec, 0x04, 0xb3, 0xf3, 0xde, 0x61, 0x06, 0x35, 0x3c, 0x6c, 0x8f, - 0x88, 0xac, 0x9b, 0x1d, 0xc5, 0x35, 0x2b, 0xb6, 0x0f, 0xff, 0x38, 0x05, 0xc2, 0x31, 0xa9, 0x8c, - 0xac, 0xbf, 0xb3, 0x8f, 0x20, 0xeb, 0xef, 0x7a, 0x9c, 0xf5, 0xf7, 0x6c, 0xfe, 0x54, 0x67, 0xb8, - 0x60, 0xe4, 0xe4, 0xfa, 0xbd, 0xa3, 0xe7, 0xe8, 0x7d, 0xbc, 0x8b, 0xd6, 0x24, 0x4b, 0xf0, 0xd8, - 0x25, 0x33, 0xef, 0x6b, 0x3c, 0x33, 0xef, 0x13, 0xf9, 0x27, 0x79, 0xf2, 0xba, 0x33, 0xf2, 0xf1, - 0xd2, 0x7e, 0xa9, 0x18, 0x8e, 0x2c, 0x86, 0x70, 0x4e, 0xbf, 0x54, 0x10, 0xc8, 0x74, 0xbf, 0x14, - 0x08, 0xc7, 0xa4, 0xec, 0x1f, 0x28, 0xc0, 0xf9, 0xee, 0xfb, 0x2d, 0x96, 0xa6, 0x56, 0x63, 0x25, - 0x72, 0x42, 0x9a, 0xca, 0xdf, 0x6c, 0x31, 0x56, 0xdf, 0x21, 0xe9, 0xae, 0xc1, 0xb4, 0xf2, 0xdd, - 0x68, 0xba, 0xf5, 0xfd, 0xf5, 0xf8, 0xe5, 0xab, 0xfc, 0xdd, 0x6b, 0x49, 0x04, 0x9c, 0xae, 0x83, - 0x16, 0x61, 0xd2, 0x28, 0xac, 0x94, 0xc5, 0xdb, 0x4c, 0x89, 0x6f, 0x6b, 0x26, 0x18, 0x27, 0xf1, - 0xed, 0x2f, 0x5b, 0xf0, 0x58, 0x4e, 0x5a, 0xbb, 0xbe, 0x23, 0xae, 0x6d, 0xc1, 0x64, 0xdb, 0xac, - 0xda, 0x23, 0x48, 0xa4, 0x91, 0x3c, 0x4f, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0xf6, 0xcf, 0x14, - 0xe0, 0x5c, 0x57, 0x63, 0x44, 0x84, 0xe1, 0xcc, 0x76, 0x2b, 0x74, 0x96, 0x03, 0xd2, 0x20, 0x5e, - 0xe4, 0x3a, 0xcd, 0x5a, 0x9b, 0xd4, 0x35, 0x79, 0x38, 0xb3, 0xea, 0xbb, 0xb6, 0x56, 0x5b, 0x4c, - 0x63, 0xe0, 0x9c, 0x9a, 0x68, 0x15, 0x50, 0x1a, 0x22, 0x66, 0x98, 0x45, 0xa3, 0x4e, 0xd3, 0xc3, - 0x19, 0x35, 0xd0, 0x47, 0x60, 0x5c, 0x19, 0x39, 0x6a, 0x33, 0xce, 0x0e, 0x76, 0xac, 0x03, 0xb0, - 0x89, 0x87, 0xae, 0xf2, 0x70, 0xe6, 0x22, 0xf0, 0xbd, 0x10, 0x9e, 0x4f, 0xca, 0x58, 0xe5, 0xa2, - 0x18, 0xeb, 0x38, 0x4b, 0x97, 0x7f, 0xe3, 0x0f, 0xce, 0xbf, 0xef, 0xb7, 0xfe, 0xe0, 0xfc, 0xfb, - 0x7e, 0xf7, 0x0f, 0xce, 0xbf, 0xef, 0xbb, 0x1e, 0x9c, 0xb7, 0x7e, 0xe3, 0xc1, 0x79, 0xeb, 0xb7, - 0x1e, 0x9c, 0xb7, 0x7e, 0xf7, 0xc1, 0x79, 0xeb, 0xf7, 0x1f, 0x9c, 0xb7, 0xbe, 0xf4, 0x87, 0xe7, - 0xdf, 0xf7, 0xa9, 0xc2, 0xde, 0xd5, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x11, 0xbd, 0x2b, 0x2b, - 0x30, 0x04, 0x01, 0x00, + // 14240 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x79, 0x70, 0x24, 0xd7, + 0x79, 0x18, 0xae, 0x9e, 0xc1, 0x35, 0x1f, 0xee, 0xb7, 0x07, 0xb1, 0x20, 0x77, 0xb1, 0x6c, 0x4a, + 0xcb, 0xa5, 0x48, 0x62, 0xb5, 0x3c, 0x24, 0x9a, 0x94, 0x68, 0x01, 0x18, 0x60, 0x17, 0xdc, 0x05, + 0x76, 0xf8, 0x06, 0xbb, 0x2b, 0xc9, 0x94, 0x4a, 0x8d, 0x99, 0x07, 0xa0, 0x85, 0x99, 0xee, 0x61, + 0x77, 0x0f, 0x76, 0xc1, 0x9f, 0x5c, 0x3f, 0x47, 0x3e, 0xe5, 0x23, 0xa5, 0x4a, 0x39, 0x47, 0xc9, + 0x2e, 0x57, 0xca, 0x71, 0x62, 0x2b, 0xca, 0xe5, 0xc8, 0xb1, 0x1d, 0xcb, 0x89, 0x9d, 0xdb, 0xc9, + 0x1f, 0xb6, 0xe3, 0x4a, 0x2c, 0x57, 0xb9, 0x82, 0xd8, 0xeb, 0x54, 0xb9, 0x54, 0x95, 0xd8, 0x4e, + 0x9c, 0xfc, 0x91, 0x8d, 0x2b, 0x4e, 0xbd, 0xb3, 0xdf, 0xeb, 0x6b, 0x06, 0x4b, 0x2c, 0x44, 0xa9, + 0xf8, 0xdf, 0xcc, 0xfb, 0xbe, 0xf7, 0xbd, 0xd7, 0xef, 0xfc, 0xde, 0x77, 0xc2, 0x2b, 0xbb, 0x2f, + 0x85, 0xf3, 0xae, 0x7f, 0x69, 0xb7, 0xbb, 0x49, 0x02, 0x8f, 0x44, 0x24, 0xbc, 0xb4, 0x47, 0xbc, + 0xa6, 0x1f, 0x5c, 0x12, 0x00, 0xa7, 0xe3, 0x5e, 0x6a, 0xf8, 0x01, 0xb9, 0xb4, 0x77, 0xf9, 0xd2, + 0x36, 0xf1, 0x48, 0xe0, 0x44, 0xa4, 0x39, 0xdf, 0x09, 0xfc, 0xc8, 0x47, 0x88, 0xe3, 0xcc, 0x3b, + 0x1d, 0x77, 0x9e, 0xe2, 0xcc, 0xef, 0x5d, 0x9e, 0x7d, 0x76, 0xdb, 0x8d, 0x76, 0xba, 0x9b, 0xf3, + 0x0d, 0xbf, 0x7d, 0x69, 0xdb, 0xdf, 0xf6, 0x2f, 0x31, 0xd4, 0xcd, 0xee, 0x16, 0xfb, 0xc7, 0xfe, + 0xb0, 0x5f, 0x9c, 0xc4, 0xec, 0x0b, 0x71, 0x33, 0x6d, 0xa7, 0xb1, 0xe3, 0x7a, 0x24, 0xd8, 0xbf, + 0xd4, 0xd9, 0xdd, 0x66, 0xed, 0x06, 0x24, 0xf4, 0xbb, 0x41, 0x83, 0x24, 0x1b, 0x2e, 0xac, 0x15, + 0x5e, 0x6a, 0x93, 0xc8, 0xc9, 0xe8, 0xee, 0xec, 0xa5, 0xbc, 0x5a, 0x41, 0xd7, 0x8b, 0xdc, 0x76, + 0xba, 0x99, 0x0f, 0xf6, 0xaa, 0x10, 0x36, 0x76, 0x48, 0xdb, 0x49, 0xd5, 0x7b, 0x3e, 0xaf, 0x5e, + 0x37, 0x72, 0x5b, 0x97, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0x9a, 0x05, 0xe7, 0x17, + 0x6e, 0xd7, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, 0x96, 0xdf, 0xd8, 0xad, 0x47, 0x7e, 0x40, + 0x6e, 0xf9, 0xad, 0x6e, 0x9b, 0xd4, 0xd9, 0x40, 0xa0, 0x67, 0x60, 0x64, 0x8f, 0xfd, 0x5f, 0xad, + 0xce, 0x58, 0xe7, 0xad, 0x8b, 0x95, 0xc5, 0xa9, 0x5f, 0x3b, 0x98, 0x7b, 0xcf, 0xbd, 0x83, 0xb9, + 0x91, 0x5b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x01, 0x86, 0xb6, 0xc2, 0x8d, 0xfd, 0x0e, 0x99, 0x29, + 0x31, 0xdc, 0x09, 0x81, 0x3b, 0xb4, 0x52, 0xa7, 0xa5, 0x58, 0x40, 0xd1, 0x25, 0xa8, 0x74, 0x9c, + 0x20, 0x72, 0x23, 0xd7, 0xf7, 0x66, 0xca, 0xe7, 0xad, 0x8b, 0x83, 0x8b, 0xd3, 0x02, 0xb5, 0x52, + 0x93, 0x00, 0x1c, 0xe3, 0xd0, 0x6e, 0x04, 0xc4, 0x69, 0xde, 0xf0, 0x5a, 0xfb, 0x33, 0x03, 0xe7, + 0xad, 0x8b, 0x23, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x62, 0x09, 0x46, 0x16, 0xb6, + 0xb6, 0x5c, 0xcf, 0x8d, 0xf6, 0xd1, 0x2d, 0x18, 0xf3, 0xfc, 0x26, 0x91, 0xff, 0xd9, 0x57, 0x8c, + 0x3e, 0x77, 0x7e, 0x3e, 0xbd, 0x94, 0xe6, 0xd7, 0x35, 0xbc, 0xc5, 0xa9, 0x7b, 0x07, 0x73, 0x63, + 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0xa3, 0x1d, 0xbf, 0xa9, 0xc8, 0x96, 0x18, 0xd9, 0xb9, 0x2c, + 0xb2, 0xb5, 0x18, 0x6d, 0x71, 0xf2, 0xde, 0xc1, 0xdc, 0xa8, 0x56, 0x80, 0x75, 0x22, 0x68, 0x13, + 0x26, 0xe9, 0x5f, 0x2f, 0x72, 0x15, 0xdd, 0x32, 0xa3, 0xfb, 0x44, 0x1e, 0x5d, 0x0d, 0x75, 0xf1, + 0xc4, 0xbd, 0x83, 0xb9, 0xc9, 0x44, 0x21, 0x4e, 0x12, 0xb4, 0xdf, 0x82, 0x89, 0x85, 0x28, 0x72, + 0x1a, 0x3b, 0xa4, 0xc9, 0x67, 0x10, 0xbd, 0x00, 0x03, 0x9e, 0xd3, 0x26, 0x62, 0x7e, 0xcf, 0x8b, + 0x81, 0x1d, 0x58, 0x77, 0xda, 0xe4, 0xfe, 0xc1, 0xdc, 0xd4, 0x4d, 0xcf, 0x7d, 0xb3, 0x2b, 0x56, + 0x05, 0x2d, 0xc3, 0x0c, 0x1b, 0x3d, 0x07, 0xd0, 0x24, 0x7b, 0x6e, 0x83, 0xd4, 0x9c, 0x68, 0x47, + 0xcc, 0x37, 0x12, 0x75, 0xa1, 0xaa, 0x20, 0x58, 0xc3, 0xb2, 0xef, 0x42, 0x65, 0x61, 0xcf, 0x77, + 0x9b, 0x35, 0xbf, 0x19, 0xa2, 0x5d, 0x98, 0xec, 0x04, 0x64, 0x8b, 0x04, 0xaa, 0x68, 0xc6, 0x3a, + 0x5f, 0xbe, 0x38, 0xfa, 0xdc, 0xc5, 0xcc, 0x8f, 0x35, 0x51, 0x97, 0xbd, 0x28, 0xd8, 0x5f, 0x7c, + 0x44, 0xb4, 0x37, 0x99, 0x80, 0xe2, 0x24, 0x65, 0xfb, 0x5f, 0x96, 0xe0, 0xd4, 0xc2, 0x5b, 0xdd, + 0x80, 0x54, 0xdd, 0x70, 0x37, 0xb9, 0xc2, 0x9b, 0x6e, 0xb8, 0xbb, 0x1e, 0x8f, 0x80, 0x5a, 0x5a, + 0x55, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x16, 0x86, 0xe9, 0xef, 0x9b, 0x78, 0x55, 0x7c, 0xf2, 0x09, + 0x81, 0x3c, 0x5a, 0x75, 0x22, 0xa7, 0xca, 0x41, 0x58, 0xe2, 0xa0, 0x35, 0x18, 0x6d, 0xb0, 0x0d, + 0xb9, 0xbd, 0xe6, 0x37, 0x09, 0x9b, 0xcc, 0xca, 0xe2, 0xd3, 0x14, 0x7d, 0x29, 0x2e, 0xbe, 0x7f, + 0x30, 0x37, 0xc3, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0x5f, 0x03, 0x8c, + 0x12, 0x64, 0xec, 0xad, 0x8b, 0xda, 0x56, 0x19, 0x64, 0x5b, 0x65, 0x2c, 0x7b, 0x9b, 0xa0, 0xcb, + 0x30, 0xb0, 0xeb, 0x7a, 0xcd, 0x99, 0x21, 0x46, 0xeb, 0x2c, 0x9d, 0xf3, 0x6b, 0xae, 0xd7, 0xbc, + 0x7f, 0x30, 0x37, 0x6d, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0xa9, 0x05, 0x73, 0x0c, 0xb6, + 0xe2, 0xb6, 0x48, 0x8d, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x0e, 0x20, 0x24, + 0x8d, 0x80, 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x57, 0x10, 0xac, 0x61, 0xd1, 0x03, 0x21, 0xdc, + 0x71, 0x02, 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x2e, 0x01, 0x38, 0xc6, 0x31, 0x0e, 0x84, + 0x72, 0xaf, 0x03, 0x01, 0x7d, 0x04, 0x26, 0xe3, 0xc6, 0xc2, 0x8e, 0xd3, 0x90, 0x03, 0xc8, 0xb6, + 0x4c, 0xdd, 0x04, 0xe1, 0x24, 0xae, 0xfd, 0xb7, 0x2d, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xc3, 0xbf, + 0xd5, 0xfe, 0x45, 0x0b, 0x86, 0x17, 0x5d, 0xaf, 0xe9, 0x7a, 0xdb, 0xe8, 0xd3, 0x30, 0x42, 0xef, + 0xa6, 0xa6, 0x13, 0x39, 0xe2, 0xdc, 0xfb, 0x80, 0xb6, 0xb7, 0xd4, 0x55, 0x31, 0xdf, 0xd9, 0xdd, + 0xa6, 0x05, 0xe1, 0x3c, 0xc5, 0xa6, 0xbb, 0xed, 0xc6, 0xe6, 0x67, 0x48, 0x23, 0x5a, 0x23, 0x91, + 0x13, 0x7f, 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0x86, 0x22, 0x27, 0xd8, 0x26, 0x91, 0x38, + 0x00, 0x33, 0x0f, 0x2a, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x06, 0x89, 0xaf, 0x85, 0x0d, 0x56, + 0x15, 0x0b, 0x12, 0xf6, 0x8f, 0x0c, 0xc3, 0x99, 0xa5, 0xfa, 0x6a, 0xce, 0xba, 0xba, 0x00, 0x43, + 0xcd, 0xc0, 0xdd, 0x23, 0x81, 0x18, 0x67, 0x45, 0xa5, 0xca, 0x4a, 0xb1, 0x80, 0xa2, 0x97, 0x60, + 0x8c, 0x5f, 0x48, 0x57, 0x1d, 0xaf, 0xd9, 0x92, 0x43, 0x7c, 0x52, 0x60, 0x8f, 0xdd, 0xd2, 0x60, + 0xd8, 0xc0, 0x3c, 0xe4, 0xa2, 0xba, 0x90, 0xd8, 0x8c, 0x79, 0x97, 0xdd, 0xe7, 0x2d, 0x98, 0xe2, + 0xcd, 0x2c, 0x44, 0x51, 0xe0, 0x6e, 0x76, 0x23, 0x12, 0xce, 0x0c, 0xb2, 0x93, 0x6e, 0x29, 0x6b, + 0xb4, 0x72, 0x47, 0x60, 0xfe, 0x56, 0x82, 0x0a, 0x3f, 0x04, 0x67, 0x44, 0xbb, 0x53, 0x49, 0x30, + 0x4e, 0x35, 0x8b, 0xbe, 0xdb, 0x82, 0xd9, 0x86, 0xef, 0x45, 0x81, 0xdf, 0x6a, 0x91, 0xa0, 0xd6, + 0xdd, 0x6c, 0xb9, 0xe1, 0x0e, 0x5f, 0xa7, 0x98, 0x6c, 0xb1, 0x93, 0x20, 0x67, 0x0e, 0x15, 0x92, + 0x98, 0xc3, 0x73, 0xf7, 0x0e, 0xe6, 0x66, 0x97, 0x72, 0x49, 0xe1, 0x82, 0x66, 0xd0, 0x2e, 0x20, + 0x7a, 0x95, 0xd6, 0x23, 0x67, 0x9b, 0xc4, 0x8d, 0x0f, 0xf7, 0xdf, 0xf8, 0xe9, 0x7b, 0x07, 0x73, + 0x68, 0x3d, 0x45, 0x02, 0x67, 0x90, 0x45, 0x6f, 0xc2, 0x49, 0x5a, 0x9a, 0xfa, 0xd6, 0x91, 0xfe, + 0x9b, 0x9b, 0xb9, 0x77, 0x30, 0x77, 0x72, 0x3d, 0x83, 0x08, 0xce, 0x24, 0x8d, 0xbe, 0xcb, 0x82, + 0x33, 0xf1, 0xe7, 0x2f, 0xdf, 0xed, 0x38, 0x5e, 0x33, 0x6e, 0xb8, 0xd2, 0x7f, 0xc3, 0xf4, 0x4c, + 0x3e, 0xb3, 0x94, 0x47, 0x09, 0xe7, 0x37, 0x32, 0xbb, 0x04, 0xa7, 0x32, 0x57, 0x0b, 0x9a, 0x82, + 0xf2, 0x2e, 0xe1, 0x5c, 0x50, 0x05, 0xd3, 0x9f, 0xe8, 0x24, 0x0c, 0xee, 0x39, 0xad, 0xae, 0xd8, + 0x28, 0x98, 0xff, 0x79, 0xb9, 0xf4, 0x92, 0x65, 0xff, 0xab, 0x32, 0x4c, 0x2e, 0xd5, 0x57, 0x1f, + 0x68, 0x17, 0xea, 0xd7, 0x50, 0xa9, 0xf0, 0x1a, 0x8a, 0x2f, 0xb5, 0x72, 0xee, 0xa5, 0xf6, 0xff, + 0x67, 0x6c, 0xa1, 0x01, 0xb6, 0x85, 0xbe, 0x2d, 0x67, 0x0b, 0x1d, 0xf1, 0xc6, 0xd9, 0xcb, 0x59, + 0x45, 0x83, 0x6c, 0x32, 0x33, 0x39, 0x96, 0xeb, 0x7e, 0xc3, 0x69, 0x25, 0x8f, 0xbe, 0x43, 0x2e, + 0xa5, 0xa3, 0x99, 0xc7, 0x06, 0x8c, 0x2d, 0x39, 0x1d, 0x67, 0xd3, 0x6d, 0xb9, 0x91, 0x4b, 0x42, + 0xf4, 0x24, 0x94, 0x9d, 0x66, 0x93, 0x71, 0x5b, 0x95, 0xc5, 0x53, 0xf7, 0x0e, 0xe6, 0xca, 0x0b, + 0x4d, 0x7a, 0xed, 0x83, 0xc2, 0xda, 0xc7, 0x14, 0x03, 0xbd, 0x1f, 0x06, 0x9a, 0x81, 0xdf, 0x99, + 0x29, 0x31, 0x4c, 0xba, 0xeb, 0x06, 0xaa, 0x81, 0xdf, 0x49, 0xa0, 0x32, 0x1c, 0xfb, 0x57, 0x4b, + 0xf0, 0xd8, 0x12, 0xe9, 0xec, 0xac, 0xd4, 0x73, 0xce, 0xef, 0x8b, 0x30, 0xd2, 0xf6, 0x3d, 0x37, + 0xf2, 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x9a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0xc3, 0x40, 0x27, + 0x66, 0x2a, 0xc7, 0x24, 0x43, 0xca, 0xd8, 0x49, 0x06, 0xa1, 0x18, 0xdd, 0x90, 0x04, 0x62, 0xc5, + 0x28, 0x8c, 0x9b, 0x21, 0x09, 0x30, 0x83, 0xc4, 0x37, 0x33, 0xbd, 0xb3, 0xc5, 0x09, 0x9d, 0xb8, + 0x99, 0x29, 0x04, 0x6b, 0x58, 0xa8, 0x06, 0x95, 0x30, 0x31, 0xb3, 0x7d, 0x6d, 0xd3, 0x71, 0x76, + 0x75, 0xab, 0x99, 0x8c, 0x89, 0x18, 0x37, 0xca, 0x50, 0xcf, 0xab, 0xfb, 0xab, 0x25, 0x40, 0x7c, + 0x08, 0xbf, 0xc9, 0x06, 0xee, 0x66, 0x7a, 0xe0, 0xfa, 0xdf, 0x12, 0x47, 0x35, 0x7a, 0xff, 0xd3, + 0x82, 0xc7, 0x96, 0x5c, 0xaf, 0x49, 0x82, 0x9c, 0x05, 0xf8, 0x70, 0xde, 0xb2, 0x87, 0x63, 0x1a, + 0x8c, 0x25, 0x36, 0x70, 0x04, 0x4b, 0xcc, 0xfe, 0x63, 0x0b, 0x10, 0xff, 0xec, 0x77, 0xdc, 0xc7, + 0xde, 0x4c, 0x7f, 0xec, 0x11, 0x2c, 0x0b, 0xfb, 0x3a, 0x4c, 0x2c, 0xb5, 0x5c, 0xe2, 0x45, 0xab, + 0xb5, 0x25, 0xdf, 0xdb, 0x72, 0xb7, 0xd1, 0xcb, 0x30, 0x11, 0xb9, 0x6d, 0xe2, 0x77, 0xa3, 0x3a, + 0x69, 0xf8, 0x1e, 0x7b, 0x49, 0x5a, 0x17, 0x07, 0x17, 0xd1, 0xbd, 0x83, 0xb9, 0x89, 0x0d, 0x03, + 0x82, 0x13, 0x98, 0xf6, 0xef, 0xd2, 0xf1, 0xf3, 0xdb, 0x1d, 0xdf, 0x23, 0x5e, 0xb4, 0xe4, 0x7b, + 0x4d, 0x2e, 0x71, 0x78, 0x19, 0x06, 0x22, 0x3a, 0x1e, 0x7c, 0xec, 0x2e, 0xc8, 0x8d, 0x42, 0x47, + 0xe1, 0xfe, 0xc1, 0xdc, 0xe9, 0x74, 0x0d, 0x36, 0x4e, 0xac, 0x0e, 0xfa, 0x36, 0x18, 0x0a, 0x23, + 0x27, 0xea, 0x86, 0x62, 0x34, 0x1f, 0x97, 0xa3, 0x59, 0x67, 0xa5, 0xf7, 0x0f, 0xe6, 0x26, 0x55, + 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0x0c, 0xb7, 0x49, 0x18, 0x3a, 0xdb, 0xf2, 0x36, 0x9c, + 0x14, 0x75, 0x87, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x04, 0x0c, 0x92, 0x20, 0xf0, 0x03, 0xb1, + 0x47, 0xc7, 0x05, 0xe2, 0xe0, 0x32, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x86, 0x05, 0x93, 0xaa, 0xaf, + 0xbc, 0xad, 0x63, 0x78, 0x15, 0x7c, 0x02, 0xa0, 0x21, 0x3f, 0x30, 0x64, 0xb7, 0xc7, 0xe8, 0x73, + 0x17, 0x32, 0x2f, 0xea, 0xd4, 0x30, 0xc6, 0x94, 0x55, 0x51, 0x88, 0x35, 0x6a, 0xf6, 0x3f, 0xb1, + 0xe0, 0x44, 0xe2, 0x8b, 0xae, 0xbb, 0x61, 0x84, 0xde, 0x48, 0x7d, 0xd5, 0x7c, 0x7f, 0x5f, 0x45, + 0x6b, 0xb3, 0x6f, 0x52, 0x4b, 0x59, 0x96, 0x68, 0x5f, 0x74, 0x15, 0x06, 0xdd, 0x88, 0xb4, 0xe5, + 0xc7, 0x3c, 0x51, 0xf8, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0xac, 0xd2, 0x9a, 0x98, 0x13, 0xb0, 0x7f, + 0xb5, 0x0c, 0x15, 0xbe, 0x6c, 0xd7, 0x9c, 0xce, 0x31, 0xcc, 0xc5, 0xd3, 0x50, 0x71, 0xdb, 0xed, + 0x6e, 0xe4, 0x6c, 0x8a, 0xe3, 0x7c, 0x84, 0x6f, 0xad, 0x55, 0x59, 0x88, 0x63, 0x38, 0x5a, 0x85, + 0x01, 0xd6, 0x15, 0xfe, 0x95, 0x4f, 0x66, 0x7f, 0xa5, 0xe8, 0xfb, 0x7c, 0xd5, 0x89, 0x1c, 0xce, + 0x49, 0xa9, 0x7b, 0x84, 0x16, 0x61, 0x46, 0x02, 0x39, 0x00, 0x9b, 0xae, 0xe7, 0x04, 0xfb, 0xb4, + 0x6c, 0xa6, 0xcc, 0x08, 0x3e, 0x5b, 0x4c, 0x70, 0x51, 0xe1, 0x73, 0xb2, 0xea, 0xc3, 0x62, 0x00, + 0xd6, 0x88, 0xce, 0x7e, 0x08, 0x2a, 0x0a, 0xf9, 0x30, 0x0c, 0xd1, 0xec, 0x47, 0x60, 0x32, 0xd1, + 0x56, 0xaf, 0xea, 0x63, 0x3a, 0x3f, 0xf5, 0x4b, 0xec, 0xc8, 0x10, 0xbd, 0x5e, 0xf6, 0xf6, 0xc4, + 0x91, 0xfb, 0x16, 0x9c, 0x6c, 0x65, 0x9c, 0x64, 0x62, 0x5e, 0xfb, 0x3f, 0xf9, 0x1e, 0x13, 0x9f, + 0x7d, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0xa0, 0x3c, 0x82, 0xdf, 0xa1, 0x1b, 0xc4, 0x69, 0xe9, 0xec, + 0xf6, 0x0d, 0x51, 0x86, 0x15, 0x94, 0x9e, 0x77, 0x27, 0x55, 0xe7, 0xaf, 0x91, 0xfd, 0x3a, 0x69, + 0x91, 0x46, 0xe4, 0x07, 0xdf, 0xd0, 0xee, 0x9f, 0xe5, 0xa3, 0xcf, 0x8f, 0xcb, 0x51, 0x41, 0xa0, + 0x7c, 0x8d, 0xec, 0xf3, 0xa9, 0xd0, 0xbf, 0xae, 0x5c, 0xf8, 0x75, 0x3f, 0x6b, 0xc1, 0xb8, 0xfa, + 0xba, 0x63, 0x38, 0x17, 0x16, 0xcd, 0x73, 0xe1, 0x6c, 0xe1, 0x02, 0xcf, 0x39, 0x11, 0xbe, 0x5a, + 0x82, 0x33, 0x0a, 0x87, 0xbe, 0x0d, 0xf8, 0x1f, 0xb1, 0xaa, 0x2e, 0x41, 0xc5, 0x53, 0x52, 0x2b, + 0xcb, 0x14, 0x17, 0xc5, 0x32, 0xab, 0x18, 0x87, 0xb2, 0x78, 0x5e, 0x2c, 0x5a, 0x1a, 0xd3, 0xc5, + 0xb9, 0x42, 0x74, 0xbb, 0x08, 0xe5, 0xae, 0xdb, 0x14, 0x17, 0xcc, 0x07, 0xe4, 0x68, 0xdf, 0x5c, + 0xad, 0xde, 0x3f, 0x98, 0x7b, 0x3c, 0x4f, 0x95, 0x40, 0x6f, 0xb6, 0x70, 0xfe, 0xe6, 0x6a, 0x15, + 0xd3, 0xca, 0x68, 0x01, 0x26, 0xa5, 0xb6, 0xe4, 0x16, 0x65, 0xb7, 0x7c, 0x4f, 0xdc, 0x43, 0x4a, + 0x26, 0x8b, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, 0x2a, 0x4c, 0xed, 0x76, 0x37, 0x49, 0x8b, 0x44, 0xfc, + 0x83, 0xaf, 0x11, 0x2e, 0xb1, 0xac, 0xc4, 0x2f, 0xb3, 0x6b, 0x09, 0x38, 0x4e, 0xd5, 0xb0, 0xff, + 0x9c, 0xdd, 0x07, 0x62, 0xf4, 0x6a, 0x81, 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0x8d, 0x5c, 0xce, 0xfd, + 0xac, 0x8a, 0x6b, 0x64, 0x7f, 0xc3, 0xa7, 0x9c, 0x79, 0xf6, 0xaa, 0x30, 0xd6, 0xfc, 0x40, 0xe1, + 0x9a, 0xff, 0xb9, 0x12, 0x9c, 0x52, 0x23, 0x60, 0x30, 0x81, 0xdf, 0xec, 0x63, 0x70, 0x19, 0x46, + 0x9b, 0x64, 0xcb, 0xe9, 0xb6, 0x22, 0x25, 0x3e, 0x1f, 0xe4, 0x2a, 0x94, 0x6a, 0x5c, 0x8c, 0x75, + 0x9c, 0x43, 0x0c, 0xdb, 0xff, 0x1a, 0x65, 0x17, 0x71, 0xe4, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, + 0xbb, 0x6b, 0x9e, 0x80, 0x41, 0xb7, 0x4d, 0x19, 0xb3, 0x92, 0xc9, 0x6f, 0xad, 0xd2, 0x42, 0xcc, + 0x61, 0xe8, 0x7d, 0x30, 0xdc, 0xf0, 0xdb, 0x6d, 0xc7, 0x6b, 0xb2, 0x2b, 0xaf, 0xb2, 0x38, 0x4a, + 0x79, 0xb7, 0x25, 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x06, 0x03, 0x4e, 0xb0, 0xcd, 0x65, 0x18, 0x95, + 0xc5, 0x11, 0xda, 0xd2, 0x42, 0xb0, 0x1d, 0x62, 0x56, 0x4a, 0x9f, 0x60, 0x77, 0xfc, 0x60, 0xd7, + 0xf5, 0xb6, 0xab, 0x6e, 0x20, 0xb6, 0x84, 0xba, 0x0b, 0x6f, 0x2b, 0x08, 0xd6, 0xb0, 0xd0, 0x0a, + 0x0c, 0x76, 0xfc, 0x20, 0x0a, 0x67, 0x86, 0xd8, 0x70, 0x3f, 0x9e, 0x73, 0x10, 0xf1, 0xaf, 0xad, + 0xf9, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1d, 0x5d, 0x87, 0x61, 0xe2, 0xed, 0xad, + 0x04, 0x7e, 0x7b, 0xe6, 0x44, 0x3e, 0xa5, 0x65, 0x8e, 0xc2, 0x97, 0x59, 0xcc, 0xa3, 0x8a, 0x62, + 0x2c, 0x49, 0xa0, 0x6f, 0x83, 0x32, 0xf1, 0xf6, 0x66, 0x86, 0x19, 0xa5, 0xd9, 0x1c, 0x4a, 0xb7, + 0x9c, 0x20, 0x3e, 0xf3, 0x97, 0xbd, 0x3d, 0x4c, 0xeb, 0xa0, 0x8f, 0x43, 0x45, 0x1e, 0x18, 0xa1, + 0x10, 0xd6, 0x65, 0x2e, 0x58, 0x79, 0xcc, 0x60, 0xf2, 0x66, 0xd7, 0x0d, 0x48, 0x9b, 0x78, 0x51, + 0x18, 0x9f, 0x90, 0x12, 0x1a, 0xe2, 0x98, 0x1a, 0xfa, 0xb8, 0x94, 0x10, 0xaf, 0xf9, 0x5d, 0x2f, + 0x0a, 0x67, 0x2a, 0xac, 0x7b, 0x99, 0xba, 0xbb, 0x5b, 0x31, 0x5e, 0x52, 0x84, 0xcc, 0x2b, 0x63, + 0x83, 0x14, 0xfa, 0x24, 0x8c, 0xf3, 0xff, 0x5c, 0x03, 0x16, 0xce, 0x9c, 0x62, 0xb4, 0xcf, 0xe7, + 0xd3, 0xe6, 0x88, 0x8b, 0xa7, 0x04, 0xf1, 0x71, 0xbd, 0x34, 0xc4, 0x26, 0x35, 0x84, 0x61, 0xbc, + 0xe5, 0xee, 0x11, 0x8f, 0x84, 0x61, 0x2d, 0xf0, 0x37, 0xc9, 0x0c, 0xb0, 0x81, 0x39, 0x93, 0xad, + 0x31, 0xf3, 0x37, 0xc9, 0xe2, 0x34, 0xa5, 0x79, 0x5d, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x26, 0x4c, + 0xd0, 0x17, 0x9b, 0x1b, 0x13, 0x1d, 0xed, 0x45, 0x94, 0xbd, 0xab, 0xb0, 0x51, 0x09, 0x27, 0x88, + 0xa0, 0x1b, 0x30, 0x16, 0x46, 0x4e, 0x10, 0x75, 0x3b, 0x9c, 0xe8, 0xe9, 0x5e, 0x44, 0x99, 0xc2, + 0xb5, 0xae, 0x55, 0xc1, 0x06, 0x01, 0xf4, 0x1a, 0x54, 0x5a, 0xee, 0x16, 0x69, 0xec, 0x37, 0x5a, + 0x64, 0x66, 0x8c, 0x51, 0xcb, 0x3c, 0x54, 0xae, 0x4b, 0x24, 0xce, 0xe7, 0xaa, 0xbf, 0x38, 0xae, + 0x8e, 0x6e, 0xc1, 0xe9, 0x88, 0x04, 0x6d, 0xd7, 0x73, 0xe8, 0x61, 0x20, 0x9e, 0x56, 0x4c, 0x91, + 0x39, 0xce, 0x76, 0xdb, 0x39, 0x31, 0x1b, 0xa7, 0x37, 0x32, 0xb1, 0x70, 0x4e, 0x6d, 0x74, 0x17, + 0x66, 0x32, 0x20, 0x7e, 0xcb, 0x6d, 0xec, 0xcf, 0x9c, 0x64, 0x94, 0x3f, 0x2c, 0x28, 0xcf, 0x6c, + 0xe4, 0xe0, 0xdd, 0x2f, 0x80, 0xe1, 0x5c, 0xea, 0xe8, 0x06, 0x4c, 0xb2, 0x13, 0xa8, 0xd6, 0x6d, + 0xb5, 0x44, 0x83, 0x13, 0xac, 0xc1, 0xf7, 0xc9, 0xfb, 0x78, 0xd5, 0x04, 0xdf, 0x3f, 0x98, 0x83, + 0xf8, 0x1f, 0x4e, 0xd6, 0x46, 0x9b, 0x4c, 0x67, 0xd6, 0x0d, 0xdc, 0x68, 0x9f, 0x9e, 0x1b, 0xe4, + 0x6e, 0x34, 0x33, 0x59, 0x28, 0xaf, 0xd0, 0x51, 0x95, 0x62, 0x4d, 0x2f, 0xc4, 0x49, 0x82, 0xf4, + 0x48, 0x0d, 0xa3, 0xa6, 0xeb, 0xcd, 0x4c, 0xf1, 0x77, 0x89, 0x3c, 0x91, 0xea, 0xb4, 0x10, 0x73, + 0x18, 0xd3, 0x97, 0xd1, 0x1f, 0x37, 0xe8, 0xcd, 0x35, 0xcd, 0x10, 0x63, 0x7d, 0x99, 0x04, 0xe0, + 0x18, 0x87, 0x32, 0x93, 0x51, 0xb4, 0x3f, 0x83, 0x18, 0xaa, 0x3a, 0x58, 0x36, 0x36, 0x3e, 0x8e, + 0x69, 0xb9, 0xbd, 0x09, 0x13, 0xea, 0x20, 0x64, 0x63, 0x82, 0xe6, 0x60, 0x90, 0xb1, 0x4f, 0x42, + 0xba, 0x56, 0xa1, 0x5d, 0x60, 0xac, 0x15, 0xe6, 0xe5, 0xac, 0x0b, 0xee, 0x5b, 0x64, 0x71, 0x3f, + 0x22, 0xfc, 0x4d, 0x5f, 0xd6, 0xba, 0x20, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x2f, 0x67, 0x43, 0xe3, + 0xd3, 0xb6, 0x8f, 0xfb, 0xe5, 0x19, 0x18, 0xd9, 0xf1, 0xc3, 0x88, 0x62, 0xb3, 0x36, 0x06, 0x63, + 0xc6, 0xf3, 0xaa, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x02, 0xe3, 0x0d, 0xbd, 0x01, 0x71, 0x39, 0xaa, + 0x63, 0xc4, 0x68, 0x1d, 0x9b, 0xb8, 0xe8, 0x25, 0x18, 0x61, 0x36, 0x20, 0x0d, 0xbf, 0x25, 0xb8, + 0x36, 0x79, 0xc3, 0x8f, 0xd4, 0x44, 0xf9, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x02, 0x0c, 0xd1, + 0x2e, 0xac, 0xd6, 0xc4, 0xb5, 0xa4, 0x04, 0x45, 0x57, 0x59, 0x29, 0x16, 0x50, 0xfb, 0x2f, 0x95, + 0xb4, 0x51, 0xa6, 0xef, 0x61, 0x82, 0x6a, 0x30, 0x7c, 0xc7, 0x71, 0x23, 0xd7, 0xdb, 0x16, 0xfc, + 0xc7, 0x53, 0x85, 0x77, 0x14, 0xab, 0x74, 0x9b, 0x57, 0xe0, 0xb7, 0xa8, 0xf8, 0x83, 0x25, 0x19, + 0x4a, 0x31, 0xe8, 0x7a, 0x1e, 0xa5, 0x58, 0xea, 0x97, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, + 0x4b, 0x32, 0xe8, 0x0d, 0x00, 0xb9, 0xc3, 0x48, 0x53, 0xd8, 0x5e, 0x3c, 0xd3, 0x9b, 0xe8, 0x86, + 0xaa, 0xb3, 0x38, 0x41, 0xef, 0xe8, 0xf8, 0x3f, 0xd6, 0xe8, 0xd9, 0x11, 0xe3, 0xd3, 0xd2, 0x9d, + 0x41, 0xdf, 0x41, 0x97, 0xb8, 0x13, 0x44, 0xa4, 0xb9, 0x10, 0x89, 0xc1, 0x79, 0x7f, 0x7f, 0x8f, + 0x94, 0x0d, 0xb7, 0x4d, 0xf4, 0xed, 0x20, 0x88, 0xe0, 0x98, 0x9e, 0xfd, 0x0b, 0x65, 0x98, 0xc9, + 0xeb, 0x2e, 0x5d, 0x74, 0xe4, 0xae, 0x1b, 0x2d, 0x51, 0xf6, 0xca, 0x32, 0x17, 0xdd, 0xb2, 0x28, + 0xc7, 0x0a, 0x83, 0xce, 0x7e, 0xe8, 0x6e, 0xcb, 0x37, 0xe6, 0x60, 0x3c, 0xfb, 0x75, 0x56, 0x8a, + 0x05, 0x94, 0xe2, 0x05, 0xc4, 0x09, 0x85, 0x71, 0x8f, 0xb6, 0x4a, 0x30, 0x2b, 0xc5, 0x02, 0xaa, + 0x4b, 0xbb, 0x06, 0x7a, 0x48, 0xbb, 0x8c, 0x21, 0x1a, 0x3c, 0xda, 0x21, 0x42, 0x9f, 0x02, 0xd8, + 0x72, 0x3d, 0x37, 0xdc, 0x61, 0xd4, 0x87, 0x0e, 0x4d, 0x5d, 0x31, 0x67, 0x2b, 0x8a, 0x0a, 0xd6, + 0x28, 0xa2, 0x17, 0x61, 0x54, 0x6d, 0xc0, 0xd5, 0x2a, 0xd3, 0x74, 0x6a, 0x96, 0x23, 0xf1, 0x69, + 0x54, 0xc5, 0x3a, 0x9e, 0xfd, 0x99, 0xe4, 0x7a, 0x11, 0x3b, 0x40, 0x1b, 0x5f, 0xab, 0xdf, 0xf1, + 0x2d, 0x15, 0x8f, 0xaf, 0xfd, 0xf5, 0x32, 0x4c, 0x1a, 0x8d, 0x75, 0xc3, 0x3e, 0xce, 0xac, 0x2b, + 0xf4, 0x00, 0x77, 0x22, 0x22, 0xf6, 0x9f, 0xdd, 0x7b, 0xab, 0xe8, 0x87, 0x3c, 0xdd, 0x01, 0xbc, + 0x3e, 0xfa, 0x14, 0x54, 0x5a, 0x4e, 0xc8, 0x24, 0x67, 0x44, 0xec, 0xbb, 0x7e, 0x88, 0xc5, 0x0f, + 0x13, 0x27, 0x8c, 0xb4, 0x5b, 0x93, 0xd3, 0x8e, 0x49, 0xd2, 0x9b, 0x86, 0xf2, 0x27, 0xd2, 0x7a, + 0x4c, 0x75, 0x82, 0x32, 0x31, 0xfb, 0x98, 0xc3, 0xd0, 0x4b, 0x30, 0x16, 0x10, 0xb6, 0x2a, 0x96, + 0x28, 0x37, 0xc7, 0x96, 0xd9, 0x60, 0xcc, 0xf6, 0x61, 0x0d, 0x86, 0x0d, 0xcc, 0xf8, 0x6d, 0x30, + 0x54, 0xf0, 0x36, 0x78, 0x0a, 0x86, 0xd9, 0x0f, 0xb5, 0x02, 0xd4, 0x6c, 0xac, 0xf2, 0x62, 0x2c, + 0xe1, 0xc9, 0x05, 0x33, 0xd2, 0xdf, 0x82, 0xa1, 0xaf, 0x0f, 0xb1, 0xa8, 0x99, 0x96, 0x79, 0x84, + 0x9f, 0x72, 0x62, 0xc9, 0x63, 0x09, 0xb3, 0xdf, 0x0f, 0x13, 0x55, 0x87, 0xb4, 0x7d, 0x6f, 0xd9, + 0x6b, 0x76, 0x7c, 0xd7, 0x8b, 0xd0, 0x0c, 0x0c, 0xb0, 0x4b, 0x84, 0x1f, 0x01, 0x03, 0xb4, 0x21, + 0xcc, 0x4a, 0xec, 0x6d, 0x38, 0x55, 0xf5, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0xb9, 0x50, 0x5b, 0xd5, + 0xde, 0xd7, 0xeb, 0xf2, 0x7d, 0xc7, 0x8d, 0xb6, 0x32, 0x8f, 0x5e, 0xad, 0x26, 0x67, 0x6b, 0x57, + 0xdc, 0x16, 0xc9, 0x91, 0x82, 0xfc, 0xd5, 0x92, 0xd1, 0x52, 0x8c, 0xaf, 0xb4, 0x5a, 0x56, 0xae, + 0x56, 0xeb, 0x75, 0x18, 0xd9, 0x72, 0x49, 0xab, 0x89, 0xc9, 0x96, 0x58, 0x89, 0x4f, 0xe6, 0xdb, + 0xa1, 0xac, 0x50, 0x4c, 0x29, 0xf5, 0xe2, 0xaf, 0xc3, 0x15, 0x51, 0x19, 0x2b, 0x32, 0x68, 0x17, + 0xa6, 0xe4, 0x83, 0x41, 0x42, 0xc5, 0xba, 0x7c, 0xaa, 0xe8, 0x15, 0x62, 0x12, 0x3f, 0x79, 0xef, + 0x60, 0x6e, 0x0a, 0x27, 0xc8, 0xe0, 0x14, 0x61, 0xfa, 0x1c, 0x6c, 0xd3, 0x13, 0x78, 0x80, 0x0d, + 0x3f, 0x7b, 0x0e, 0xb2, 0x97, 0x2d, 0x2b, 0xb5, 0x7f, 0xdc, 0x82, 0x47, 0x52, 0x23, 0x23, 0x5e, + 0xf8, 0x47, 0x3c, 0x0b, 0xc9, 0x17, 0x77, 0xa9, 0xf7, 0x8b, 0xdb, 0xfe, 0x3b, 0x16, 0x9c, 0x5c, + 0x6e, 0x77, 0xa2, 0xfd, 0xaa, 0x6b, 0xaa, 0xa0, 0x3e, 0x04, 0x43, 0x6d, 0xd2, 0x74, 0xbb, 0x6d, + 0x31, 0x73, 0x73, 0xf2, 0x94, 0x5a, 0x63, 0xa5, 0xf7, 0x0f, 0xe6, 0xc6, 0xeb, 0x91, 0x1f, 0x38, + 0xdb, 0x84, 0x17, 0x60, 0x81, 0xce, 0xce, 0x7a, 0xf7, 0x2d, 0x72, 0xdd, 0x6d, 0xbb, 0xd2, 0xae, + 0xa8, 0x50, 0x66, 0x37, 0x2f, 0x07, 0x74, 0xfe, 0xf5, 0xae, 0xe3, 0x45, 0x6e, 0xb4, 0x2f, 0xb4, + 0x47, 0x92, 0x08, 0x8e, 0xe9, 0xd9, 0x5f, 0xb3, 0x60, 0x52, 0xae, 0xfb, 0x85, 0x66, 0x33, 0x20, + 0x61, 0x88, 0x66, 0xa1, 0xe4, 0x76, 0x44, 0x2f, 0x41, 0xf4, 0xb2, 0xb4, 0x5a, 0xc3, 0x25, 0xb7, + 0x23, 0xd9, 0x32, 0x76, 0x10, 0x96, 0x4d, 0x45, 0xda, 0x55, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x08, + 0x23, 0x9e, 0xdf, 0xe4, 0xb6, 0x5d, 0xfc, 0x4a, 0x63, 0x0b, 0x6c, 0x5d, 0x94, 0x61, 0x05, 0x45, + 0x35, 0xa8, 0x70, 0xb3, 0xa7, 0x78, 0xd1, 0xf6, 0x65, 0x3c, 0xc5, 0xbe, 0x6c, 0x43, 0xd6, 0xc4, + 0x31, 0x11, 0xfb, 0x57, 0x2c, 0x18, 0x93, 0x5f, 0xd6, 0x27, 0xcf, 0x49, 0xb7, 0x56, 0xcc, 0x6f, + 0xc6, 0x5b, 0x8b, 0xf2, 0x8c, 0x0c, 0x62, 0xb0, 0x8a, 0xe5, 0x43, 0xb1, 0x8a, 0x97, 0x61, 0xd4, + 0xe9, 0x74, 0x6a, 0x26, 0x9f, 0xc9, 0x96, 0xd2, 0x42, 0x5c, 0x8c, 0x75, 0x1c, 0xfb, 0xc7, 0x4a, + 0x30, 0x21, 0xbf, 0xa0, 0xde, 0xdd, 0x0c, 0x49, 0x84, 0x36, 0xa0, 0xe2, 0xf0, 0x59, 0x22, 0x72, + 0x91, 0x3f, 0x91, 0x2d, 0x47, 0x30, 0xa6, 0x34, 0xbe, 0xf0, 0x17, 0x64, 0x6d, 0x1c, 0x13, 0x42, + 0x2d, 0x98, 0xf6, 0xfc, 0x88, 0x1d, 0xfe, 0x0a, 0x5e, 0xa4, 0xda, 0x49, 0x52, 0x3f, 0x23, 0xa8, + 0x4f, 0xaf, 0x27, 0xa9, 0xe0, 0x34, 0x61, 0xb4, 0x2c, 0x65, 0x33, 0xe5, 0x7c, 0x61, 0x80, 0x3e, + 0x71, 0xd9, 0xa2, 0x19, 0xfb, 0x97, 0x2d, 0xa8, 0x48, 0xb4, 0xe3, 0xd0, 0xe2, 0xad, 0xc1, 0x70, + 0xc8, 0x26, 0x41, 0x0e, 0x8d, 0x5d, 0xd4, 0x71, 0x3e, 0x5f, 0xf1, 0x9d, 0xc6, 0xff, 0x87, 0x58, + 0xd2, 0x60, 0xa2, 0x79, 0xd5, 0xfd, 0x77, 0x88, 0x68, 0x5e, 0xf5, 0x27, 0xe7, 0x52, 0xfa, 0x43, + 0xd6, 0x67, 0x4d, 0xd6, 0x45, 0x59, 0xaf, 0x4e, 0x40, 0xb6, 0xdc, 0xbb, 0x49, 0xd6, 0xab, 0xc6, + 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0x60, 0xac, 0x21, 0x65, 0xb2, 0xf1, 0x0e, 0xbf, 0x50, 0xa8, 0x1f, + 0x50, 0xaa, 0x24, 0x2e, 0x0b, 0x59, 0xd2, 0xea, 0x63, 0x83, 0x9a, 0x69, 0x46, 0x50, 0xee, 0x65, + 0x46, 0x10, 0xd3, 0xcd, 0x57, 0xaa, 0xff, 0x84, 0x05, 0x43, 0x5c, 0x16, 0xd7, 0x9f, 0x28, 0x54, + 0xd3, 0xac, 0xc5, 0x63, 0x77, 0x8b, 0x16, 0x0a, 0x4d, 0x19, 0x5a, 0x83, 0x0a, 0xfb, 0xc1, 0x64, + 0x89, 0xe5, 0x7c, 0xab, 0x7b, 0xde, 0xaa, 0xde, 0xc1, 0x5b, 0xb2, 0x1a, 0x8e, 0x29, 0xd8, 0x3f, + 0x5a, 0xa6, 0xa7, 0x5b, 0x8c, 0x6a, 0x5c, 0xfa, 0xd6, 0xc3, 0xbb, 0xf4, 0x4b, 0x0f, 0xeb, 0xd2, + 0xdf, 0x86, 0xc9, 0x86, 0xa6, 0x87, 0x8b, 0x67, 0xf2, 0x62, 0xe1, 0x22, 0xd1, 0x54, 0x76, 0x5c, + 0xca, 0xb2, 0x64, 0x12, 0xc1, 0x49, 0xaa, 0xe8, 0x3b, 0x60, 0x8c, 0xcf, 0xb3, 0x68, 0x85, 0x5b, + 0x62, 0xbc, 0x2f, 0x7f, 0xbd, 0xe8, 0x4d, 0x70, 0xa9, 0x9c, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x3f, + 0xb1, 0x00, 0x2d, 0x77, 0x76, 0x48, 0x9b, 0x04, 0x4e, 0x2b, 0x16, 0xa7, 0xff, 0xa0, 0x05, 0x33, + 0x24, 0x55, 0xbc, 0xe4, 0xb7, 0xdb, 0xe2, 0xd1, 0x92, 0xf3, 0xae, 0x5e, 0xce, 0xa9, 0xa3, 0xdc, + 0x12, 0x66, 0xf2, 0x30, 0x70, 0x6e, 0x7b, 0x68, 0x0d, 0x4e, 0xf0, 0x5b, 0x52, 0x01, 0x34, 0xdb, + 0xeb, 0x47, 0x05, 0xe1, 0x13, 0x1b, 0x69, 0x14, 0x9c, 0x55, 0xcf, 0xfe, 0x9e, 0x31, 0xc8, 0xed, + 0xc5, 0xbb, 0x7a, 0x84, 0x77, 0xf5, 0x08, 0xef, 0xea, 0x11, 0xde, 0xd5, 0x23, 0xbc, 0xab, 0x47, + 0xf8, 0x96, 0xd7, 0x23, 0xfc, 0x65, 0x0b, 0x4e, 0xa9, 0x6b, 0xc0, 0x78, 0xf8, 0x7e, 0x16, 0x4e, + 0xf0, 0xed, 0xb6, 0xd4, 0x72, 0xdc, 0xf6, 0x06, 0x69, 0x77, 0x5a, 0x4e, 0x24, 0xb5, 0xee, 0x97, + 0x33, 0x57, 0x6e, 0xc2, 0x62, 0xd5, 0xa8, 0xb8, 0xf8, 0x08, 0xbd, 0x9e, 0x32, 0x00, 0x38, 0xab, + 0x19, 0xfb, 0x17, 0x46, 0x60, 0x70, 0x79, 0x8f, 0x78, 0xd1, 0x31, 0x3c, 0x11, 0x1a, 0x30, 0xe1, + 0x7a, 0x7b, 0x7e, 0x6b, 0x8f, 0x34, 0x39, 0xfc, 0x30, 0x2f, 0xd9, 0xd3, 0x82, 0xf4, 0xc4, 0xaa, + 0x41, 0x02, 0x27, 0x48, 0x3e, 0x0c, 0x69, 0xf2, 0x15, 0x18, 0xe2, 0x87, 0xb8, 0x10, 0x25, 0x67, + 0x9e, 0xd9, 0x6c, 0x10, 0xc5, 0xd5, 0x14, 0x4b, 0xba, 0xf9, 0x25, 0x21, 0xaa, 0xa3, 0xcf, 0xc0, + 0xc4, 0x96, 0x1b, 0x84, 0xd1, 0x86, 0xdb, 0x26, 0x61, 0xe4, 0xb4, 0x3b, 0x0f, 0x20, 0x3d, 0x56, + 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x1b, 0xc6, 0x5b, 0x8e, 0xde, 0xd4, 0xf0, 0xa1, + 0x9b, 0x52, 0xb7, 0xc3, 0x75, 0x9d, 0x10, 0x36, 0xe9, 0xd2, 0xed, 0xd4, 0x60, 0x02, 0xd0, 0x11, + 0x26, 0x16, 0x50, 0xdb, 0x89, 0x4b, 0x3e, 0x39, 0x8c, 0x32, 0x3a, 0xcc, 0x40, 0xb6, 0x62, 0x32, + 0x3a, 0x9a, 0x19, 0xec, 0xa7, 0xa1, 0x42, 0xe8, 0x10, 0x52, 0xc2, 0xe2, 0x82, 0xb9, 0xd4, 0x5f, + 0x5f, 0xd7, 0xdc, 0x46, 0xe0, 0x9b, 0x72, 0xfb, 0x65, 0x49, 0x09, 0xc7, 0x44, 0xd1, 0x12, 0x0c, + 0x85, 0x24, 0x70, 0x49, 0x28, 0xae, 0x9a, 0x82, 0x69, 0x64, 0x68, 0xdc, 0xb7, 0x84, 0xff, 0xc6, + 0xa2, 0x2a, 0x5d, 0x5e, 0x0e, 0x13, 0x69, 0xb2, 0xcb, 0x40, 0x5b, 0x5e, 0x0b, 0xac, 0x14, 0x0b, + 0x28, 0x7a, 0x0d, 0x86, 0x03, 0xd2, 0x62, 0x8a, 0xa1, 0xf1, 0xfe, 0x17, 0x39, 0xd7, 0x33, 0xf1, + 0x7a, 0x58, 0x12, 0x40, 0xd7, 0x00, 0x05, 0x84, 0x32, 0x4a, 0xae, 0xb7, 0xad, 0xcc, 0x46, 0xc5, + 0x41, 0xab, 0x18, 0x52, 0x1c, 0x63, 0x48, 0x37, 0x1f, 0x9c, 0x51, 0x0d, 0x5d, 0x81, 0x69, 0x55, + 0xba, 0xea, 0x85, 0x91, 0x43, 0x0f, 0xb8, 0x49, 0x46, 0x4b, 0xc9, 0x29, 0x70, 0x12, 0x01, 0xa7, + 0xeb, 0xd8, 0x5f, 0xb2, 0x80, 0x8f, 0xf3, 0x31, 0xbc, 0xce, 0x5f, 0x35, 0x5f, 0xe7, 0x67, 0x72, + 0x67, 0x2e, 0xe7, 0x65, 0xfe, 0x25, 0x0b, 0x46, 0xb5, 0x99, 0x8d, 0xd7, 0xac, 0x55, 0xb0, 0x66, + 0xbb, 0x30, 0x45, 0x57, 0xfa, 0x8d, 0xcd, 0x90, 0x04, 0x7b, 0xa4, 0xc9, 0x16, 0x66, 0xe9, 0xc1, + 0x16, 0xa6, 0x32, 0x51, 0xbb, 0x9e, 0x20, 0x88, 0x53, 0x4d, 0xd8, 0x9f, 0x96, 0x5d, 0x55, 0x16, + 0x7d, 0x0d, 0x35, 0xe7, 0x09, 0x8b, 0x3e, 0x35, 0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xc7, 0x0f, + 0xa3, 0xa4, 0x45, 0xdf, 0x55, 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xcf, 0x03, 0x2c, 0xdf, 0x25, 0x0d, + 0xbe, 0x62, 0xf5, 0xc7, 0x83, 0x95, 0xff, 0x78, 0xb0, 0x7f, 0xcb, 0x82, 0x89, 0x95, 0x25, 0xe3, + 0xe6, 0x9a, 0x07, 0xe0, 0x2f, 0x9e, 0xdb, 0xb7, 0xd7, 0xa5, 0x3a, 0x9c, 0x6b, 0x34, 0x55, 0x29, + 0xd6, 0x30, 0xd0, 0x19, 0x28, 0xb7, 0xba, 0x9e, 0x10, 0x1f, 0x0e, 0xd3, 0xeb, 0xf1, 0x7a, 0xd7, + 0xc3, 0xb4, 0x4c, 0x73, 0x29, 0x28, 0xf7, 0xed, 0x52, 0xd0, 0xd3, 0xb5, 0x1f, 0xcd, 0xc1, 0xe0, + 0x9d, 0x3b, 0x6e, 0x93, 0x3b, 0x50, 0x0a, 0x55, 0xfd, 0xed, 0xdb, 0xab, 0xd5, 0x10, 0xf3, 0x72, + 0xfb, 0x0b, 0x65, 0x98, 0x5d, 0x69, 0x91, 0xbb, 0x6f, 0xd3, 0x89, 0xb4, 0x5f, 0x87, 0x88, 0xc3, + 0x09, 0x62, 0x0e, 0xeb, 0xf4, 0xd2, 0x7b, 0x3c, 0xb6, 0x60, 0x98, 0x1b, 0xb4, 0x49, 0x97, 0xd2, + 0x57, 0xb2, 0x5a, 0xcf, 0x1f, 0x90, 0x79, 0x6e, 0x18, 0x27, 0x3c, 0xe2, 0xd4, 0x85, 0x29, 0x4a, + 0xb1, 0x24, 0x3e, 0xfb, 0x32, 0x8c, 0xe9, 0x98, 0x87, 0x72, 0x3f, 0xfb, 0x0b, 0x65, 0x98, 0xa2, + 0x3d, 0x78, 0xa8, 0x13, 0x71, 0x33, 0x3d, 0x11, 0x47, 0xed, 0x82, 0xd4, 0x7b, 0x36, 0xde, 0x48, + 0xce, 0xc6, 0xe5, 0xbc, 0xd9, 0x38, 0xee, 0x39, 0xf8, 0x6e, 0x0b, 0x4e, 0xac, 0xb4, 0xfc, 0xc6, + 0x6e, 0xc2, 0x4d, 0xe8, 0x45, 0x18, 0xa5, 0xc7, 0x71, 0x68, 0x78, 0xb0, 0x1b, 0x31, 0x0d, 0x04, + 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0x9b, 0x37, 0x57, 0xab, 0x59, 0xa1, 0x10, 0x04, 0x08, 0xeb, 0x78, + 0xf6, 0xaf, 0x5b, 0x70, 0xf6, 0xca, 0xd2, 0x72, 0xbc, 0x14, 0x53, 0xd1, 0x18, 0x2e, 0xc0, 0x50, + 0xa7, 0xa9, 0x75, 0x25, 0x16, 0xaf, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0x9d, 0x12, 0x69, 0xe4, 0x26, + 0xc0, 0x15, 0x5c, 0x5b, 0x12, 0xe7, 0xae, 0xd4, 0xa6, 0x58, 0xb9, 0xda, 0x94, 0xf7, 0xc1, 0x30, + 0xbd, 0x17, 0xdc, 0x86, 0xec, 0x37, 0x57, 0xd0, 0xf2, 0x22, 0x2c, 0x61, 0xf6, 0xcf, 0x58, 0x70, + 0xe2, 0x8a, 0x1b, 0xd1, 0x4b, 0x3b, 0x19, 0x6e, 0x80, 0xde, 0xda, 0xa1, 0x1b, 0xf9, 0xc1, 0x7e, + 0x32, 0xdc, 0x00, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x0f, 0xda, 0x73, 0x99, 0x85, 0x76, 0xc9, 0xd4, + 0x5f, 0x61, 0x51, 0x8e, 0x15, 0x06, 0x1d, 0xaf, 0xa6, 0x1b, 0x30, 0xd1, 0xdf, 0xbe, 0x38, 0xb8, + 0xd5, 0x78, 0x55, 0x25, 0x00, 0xc7, 0x38, 0xf6, 0x1f, 0x59, 0x30, 0x77, 0xa5, 0xd5, 0x0d, 0x23, + 0x12, 0x6c, 0x85, 0x39, 0x87, 0xee, 0xf3, 0x50, 0x21, 0x52, 0xd0, 0x2e, 0x7a, 0xad, 0x18, 0x51, + 0x25, 0x81, 0xe7, 0x51, 0x0f, 0x14, 0x5e, 0x1f, 0xbe, 0x8c, 0x87, 0x73, 0x46, 0x5b, 0x01, 0x44, + 0xf4, 0xb6, 0xf4, 0x30, 0x10, 0xcc, 0x9f, 0x7c, 0x39, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x1f, 0xb7, + 0xe0, 0x94, 0xfa, 0xe0, 0x77, 0xdc, 0x67, 0xda, 0x5f, 0x29, 0xc1, 0xf8, 0xd5, 0x8d, 0x8d, 0xda, + 0x15, 0x12, 0x69, 0xab, 0xb2, 0x58, 0x7d, 0x8e, 0x35, 0x2d, 0x60, 0xd1, 0x1b, 0xb1, 0x1b, 0xb9, + 0xad, 0x79, 0x1e, 0x4d, 0x68, 0x7e, 0xd5, 0x8b, 0x6e, 0x04, 0xf5, 0x28, 0x70, 0xbd, 0xed, 0xcc, + 0x95, 0x2e, 0x79, 0x96, 0x72, 0x1e, 0xcf, 0x82, 0x9e, 0x87, 0x21, 0x16, 0xce, 0x48, 0x4e, 0xc2, + 0xa3, 0xea, 0x89, 0xc5, 0x4a, 0xef, 0x1f, 0xcc, 0x55, 0x6e, 0xe2, 0x55, 0xfe, 0x07, 0x0b, 0x54, + 0x74, 0x13, 0x46, 0x77, 0xa2, 0xa8, 0x73, 0x95, 0x38, 0x4d, 0x12, 0xc8, 0x53, 0xf6, 0x5c, 0xd6, + 0x29, 0x4b, 0x07, 0x81, 0xa3, 0xc5, 0x07, 0x53, 0x5c, 0x16, 0x62, 0x9d, 0x8e, 0x5d, 0x07, 0x88, + 0x61, 0x47, 0xa4, 0x00, 0xb1, 0x37, 0xa0, 0x42, 0x3f, 0x77, 0xa1, 0xe5, 0x3a, 0xc5, 0x2a, 0xe6, + 0xa7, 0xa1, 0x22, 0x15, 0xc8, 0xa1, 0xf0, 0xb5, 0x66, 0x37, 0x92, 0xd4, 0x2f, 0x87, 0x38, 0x86, + 0xdb, 0x5b, 0x70, 0x92, 0x99, 0x03, 0x3a, 0xd1, 0x8e, 0xb1, 0xfa, 0x7a, 0x4f, 0xf3, 0x33, 0xe2, + 0xc5, 0xc6, 0xfb, 0x3c, 0xa3, 0xb9, 0x33, 0x8e, 0x49, 0x8a, 0xf1, 0xeb, 0xcd, 0xfe, 0xfa, 0x00, + 0x3c, 0xba, 0x5a, 0xcf, 0x0f, 0xc7, 0xf1, 0x12, 0x8c, 0x71, 0x46, 0x90, 0x4e, 0xba, 0xd3, 0x12, + 0xed, 0x2a, 0xd9, 0xe6, 0x86, 0x06, 0xc3, 0x06, 0x26, 0x3a, 0x0b, 0x65, 0xf7, 0x4d, 0x2f, 0xe9, + 0xec, 0xb3, 0xfa, 0xfa, 0x3a, 0xa6, 0xe5, 0x14, 0x4c, 0x79, 0x4a, 0x7e, 0x58, 0x2b, 0xb0, 0xe2, + 0x2b, 0x5f, 0x85, 0x09, 0x37, 0x6c, 0x84, 0xee, 0xaa, 0x47, 0x77, 0xa0, 0xb6, 0x87, 0x95, 0x34, + 0x81, 0x76, 0x5a, 0x41, 0x71, 0x02, 0x5b, 0xbb, 0x39, 0x06, 0xfb, 0xe6, 0x4b, 0x7b, 0x3a, 0x1f, + 0xd3, 0x83, 0xbd, 0xc3, 0xbe, 0x2e, 0x64, 0x42, 0x6a, 0x71, 0xb0, 0xf3, 0x0f, 0x0e, 0xb1, 0x84, + 0xd1, 0xa7, 0x5a, 0x63, 0xc7, 0xe9, 0x2c, 0x74, 0xa3, 0x9d, 0xaa, 0x1b, 0x36, 0xfc, 0x3d, 0x12, + 0xec, 0xb3, 0x57, 0xf6, 0x48, 0xfc, 0x54, 0x53, 0x80, 0xa5, 0xab, 0x0b, 0x35, 0x8a, 0x89, 0xd3, + 0x75, 0xd0, 0x02, 0x4c, 0xca, 0xc2, 0x3a, 0x09, 0xd9, 0xe1, 0x3e, 0xca, 0xc8, 0x28, 0xf7, 0x1b, + 0x51, 0xac, 0x88, 0x24, 0xf1, 0x4d, 0xd6, 0x15, 0x8e, 0x82, 0x75, 0xfd, 0x10, 0x8c, 0xbb, 0x9e, + 0x1b, 0xb9, 0x4e, 0xe4, 0x73, 0x0d, 0x0b, 0x7f, 0x50, 0x33, 0xd1, 0xf1, 0xaa, 0x0e, 0xc0, 0x26, + 0x9e, 0xfd, 0x5f, 0x06, 0x60, 0x9a, 0x4d, 0xdb, 0xbb, 0x2b, 0xec, 0x5b, 0x69, 0x85, 0xdd, 0x4c, + 0xaf, 0xb0, 0xa3, 0xe0, 0xc9, 0x1f, 0x78, 0x99, 0x7d, 0x06, 0x2a, 0xca, 0xe3, 0x48, 0xba, 0x1c, + 0x5a, 0x39, 0x2e, 0x87, 0xbd, 0xef, 0x65, 0x69, 0xb4, 0x55, 0xce, 0x34, 0xda, 0xfa, 0xb2, 0x05, + 0xb1, 0xca, 0x00, 0xbd, 0x0e, 0x95, 0x8e, 0xcf, 0x6c, 0x11, 0x03, 0x69, 0xe0, 0xfb, 0xde, 0x42, + 0x9d, 0x03, 0x8f, 0x48, 0x14, 0xf0, 0x51, 0xa8, 0xc9, 0xaa, 0x38, 0xa6, 0x82, 0xae, 0xc1, 0x70, + 0x27, 0x20, 0xf5, 0x88, 0x85, 0xe7, 0xe8, 0x9f, 0x20, 0x5f, 0x35, 0xbc, 0x22, 0x96, 0x14, 0xec, + 0xff, 0x6a, 0xc1, 0x54, 0x12, 0x15, 0x7d, 0x18, 0x06, 0xc8, 0x5d, 0xd2, 0x10, 0xfd, 0xcd, 0xbc, + 0x64, 0x63, 0xa1, 0x03, 0x1f, 0x00, 0xfa, 0x1f, 0xb3, 0x5a, 0xe8, 0x2a, 0x0c, 0xd3, 0x1b, 0xf6, + 0x8a, 0x0a, 0x0d, 0xf5, 0x78, 0xde, 0x2d, 0xad, 0x58, 0x15, 0xde, 0x39, 0x51, 0x84, 0x65, 0x75, + 0x66, 0x29, 0xd5, 0xe8, 0xd4, 0xe9, 0xe3, 0x25, 0x2a, 0x7a, 0x63, 0x6f, 0x2c, 0xd5, 0x38, 0x92, + 0xa0, 0xc6, 0x2d, 0xa5, 0x64, 0x21, 0x8e, 0x89, 0xd8, 0x3f, 0x67, 0x01, 0x70, 0xc3, 0x30, 0xc7, + 0xdb, 0x26, 0xc7, 0x20, 0x27, 0xaf, 0xc2, 0x40, 0xd8, 0x21, 0x8d, 0x22, 0x33, 0xd9, 0xb8, 0x3f, + 0xf5, 0x0e, 0x69, 0xc4, 0x2b, 0x8e, 0xfe, 0xc3, 0xac, 0xb6, 0xfd, 0xbd, 0x00, 0x13, 0x31, 0xda, + 0x6a, 0x44, 0xda, 0xe8, 0x59, 0x23, 0x4c, 0xc1, 0x99, 0x44, 0x98, 0x82, 0x0a, 0xc3, 0xd6, 0x44, + 0xb2, 0x9f, 0x81, 0x72, 0xdb, 0xb9, 0x2b, 0x64, 0x6e, 0x4f, 0x17, 0x77, 0x83, 0xd2, 0x9f, 0x5f, + 0x73, 0xee, 0xf2, 0x67, 0xe9, 0xd3, 0x72, 0x87, 0xac, 0x39, 0x77, 0xef, 0x73, 0x63, 0x58, 0x76, + 0x4a, 0x5f, 0x77, 0xc3, 0xe8, 0x73, 0xff, 0x39, 0xfe, 0xcf, 0xf6, 0x1d, 0x6d, 0x84, 0xb5, 0xe5, + 0x7a, 0xc2, 0xe6, 0xa9, 0xaf, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0xf5, 0xd1, 0x96, 0xeb, 0xa1, + 0xb7, 0x60, 0x58, 0x98, 0x24, 0x8a, 0xb0, 0x40, 0x97, 0xfa, 0x68, 0x4f, 0x58, 0x34, 0xf2, 0x36, + 0x2f, 0xc9, 0x67, 0xb7, 0x28, 0xed, 0xd9, 0xae, 0x6c, 0x10, 0xfd, 0x15, 0x0b, 0x26, 0xc4, 0x6f, + 0x4c, 0xde, 0xec, 0x92, 0x30, 0x12, 0x6c, 0xe9, 0x07, 0xfb, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, + 0x41, 0x79, 0xcf, 0x98, 0xc0, 0x9e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x3d, 0x0b, 0x4e, 0xb6, 0x9d, + 0xbb, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x6a, 0xff, 0xc3, 0xfd, 0x4d, 0x7f, 0xaa, + 0x3a, 0xef, 0xa4, 0xd4, 0x3f, 0x9e, 0xcc, 0x42, 0xe9, 0xd9, 0xd5, 0xcc, 0x7e, 0xcd, 0x6e, 0xc1, + 0x88, 0x5c, 0x6f, 0x19, 0xc2, 0x8d, 0xaa, 0xce, 0x73, 0x1f, 0xda, 0x22, 0x54, 0x77, 0xff, 0xa7, + 0xed, 0x88, 0xb5, 0xf6, 0x50, 0xdb, 0xf9, 0x0c, 0x8c, 0xe9, 0x6b, 0xec, 0xa1, 0xb6, 0xf5, 0x26, + 0x9c, 0xc8, 0x58, 0x4b, 0x0f, 0xb5, 0xc9, 0x3b, 0x70, 0x26, 0x77, 0x7d, 0x3c, 0xcc, 0x86, 0xed, + 0xaf, 0x58, 0xfa, 0x39, 0x78, 0x0c, 0xca, 0x8a, 0x25, 0x53, 0x59, 0x71, 0xae, 0x78, 0xe7, 0xe4, + 0x68, 0x2c, 0xde, 0xd0, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x1a, 0x0c, 0xb5, 0x68, 0x89, 0x34, 0x6c, + 0xb5, 0x7b, 0xef, 0xc8, 0x98, 0x99, 0x64, 0xe5, 0x21, 0x16, 0x14, 0xec, 0x5f, 0xb4, 0x60, 0xe0, + 0x18, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x6c, 0x2e, 0x69, 0x11, 0xb1, 0x78, 0x1e, 0x3b, 0x77, 0x96, + 0xef, 0x46, 0xc4, 0x0b, 0xd9, 0x8d, 0x9c, 0x39, 0x30, 0x3f, 0x65, 0xc1, 0x89, 0xeb, 0xbe, 0xd3, + 0x5c, 0x74, 0x5a, 0x8e, 0xd7, 0x20, 0xc1, 0xaa, 0xb7, 0x7d, 0x28, 0xab, 0xec, 0x52, 0x4f, 0xab, + 0xec, 0x25, 0x69, 0xd4, 0x34, 0x90, 0x3f, 0x7f, 0x94, 0x93, 0x4e, 0x06, 0x6e, 0x31, 0xcc, 0x6f, + 0x77, 0x00, 0xe9, 0xbd, 0x14, 0x3e, 0x32, 0x18, 0x86, 0x5d, 0xde, 0x5f, 0x31, 0x89, 0x4f, 0x66, + 0x73, 0xb8, 0xa9, 0xcf, 0xd3, 0xbc, 0x3f, 0x78, 0x01, 0x96, 0x84, 0xec, 0x97, 0x20, 0xd3, 0xd1, + 0xbe, 0xb7, 0x5c, 0xc2, 0xfe, 0x38, 0x4c, 0xb3, 0x9a, 0x87, 0x94, 0x0c, 0xd8, 0x09, 0x69, 0x6a, + 0x46, 0x08, 0x3e, 0xfb, 0xf3, 0x16, 0x4c, 0xae, 0x27, 0x22, 0x93, 0x5d, 0x60, 0xfa, 0xd7, 0x0c, + 0x21, 0x7e, 0x9d, 0x95, 0x62, 0x01, 0x3d, 0x72, 0x21, 0xd7, 0x9f, 0x5b, 0x10, 0xc7, 0xbe, 0x38, + 0x06, 0xf6, 0x6d, 0xc9, 0x60, 0xdf, 0x32, 0x19, 0x59, 0xd5, 0x9d, 0x3c, 0xee, 0x0d, 0x5d, 0x53, + 0x51, 0xa1, 0x0a, 0x78, 0xd8, 0x98, 0x0c, 0x5f, 0x8a, 0x13, 0x66, 0xe8, 0x28, 0x19, 0x27, 0xca, + 0xfe, 0xed, 0x12, 0x20, 0x85, 0xdb, 0x77, 0xd4, 0xaa, 0x74, 0x8d, 0xa3, 0x89, 0x5a, 0xb5, 0x07, + 0x88, 0x59, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb2, 0xae, 0x10, 0xeb, 0x1d, 0xce, 0x3c, 0x61, 0x56, + 0x34, 0x89, 0xae, 0xa7, 0xa8, 0xe1, 0x8c, 0x16, 0x34, 0xcb, 0x90, 0xc1, 0x7e, 0x2d, 0x43, 0x86, + 0x7a, 0xf8, 0xc1, 0xfd, 0xac, 0x05, 0xe3, 0x6a, 0x98, 0xde, 0x21, 0x56, 0xea, 0xaa, 0x3f, 0x39, + 0x07, 0x68, 0x4d, 0xeb, 0x32, 0xbb, 0x58, 0xbe, 0x9d, 0xf9, 0x33, 0x3a, 0x2d, 0xf7, 0x2d, 0xa2, + 0x62, 0x06, 0xce, 0x09, 0xff, 0x44, 0x51, 0x7a, 0xff, 0x60, 0x6e, 0x5c, 0xfd, 0xe3, 0x31, 0x8a, + 0xe3, 0x2a, 0xf4, 0x48, 0x9e, 0x4c, 0x2c, 0x45, 0xf4, 0x22, 0x0c, 0x76, 0x76, 0x9c, 0x90, 0x24, + 0xbc, 0x79, 0x06, 0x6b, 0xb4, 0xf0, 0xfe, 0xc1, 0xdc, 0x84, 0xaa, 0xc0, 0x4a, 0x30, 0xc7, 0xee, + 0x3f, 0x16, 0x58, 0x7a, 0x71, 0xf6, 0x8c, 0x05, 0xf6, 0x27, 0x16, 0x0c, 0xac, 0xfb, 0xcd, 0xe3, + 0x38, 0x02, 0x5e, 0x35, 0x8e, 0x80, 0xc7, 0xf2, 0xc2, 0xc7, 0xe7, 0xee, 0xfe, 0x95, 0xc4, 0xee, + 0x3f, 0x97, 0x4b, 0xa1, 0x78, 0xe3, 0xb7, 0x61, 0x94, 0x05, 0xa5, 0x17, 0x9e, 0x4b, 0xcf, 0x1b, + 0x1b, 0x7e, 0x2e, 0xb1, 0xe1, 0x27, 0x35, 0x54, 0x6d, 0xa7, 0x3f, 0x05, 0xc3, 0xc2, 0x15, 0x26, + 0xe9, 0x16, 0x2a, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x51, 0x06, 0x23, 0x08, 0x3e, 0xfa, 0x65, 0x0b, + 0xe6, 0x03, 0x6e, 0x22, 0xdb, 0xac, 0x76, 0x03, 0xd7, 0xdb, 0xae, 0x37, 0x76, 0x48, 0xb3, 0xdb, + 0x72, 0xbd, 0xed, 0xd5, 0x6d, 0xcf, 0x57, 0xc5, 0xcb, 0x77, 0x49, 0xa3, 0xcb, 0xd4, 0x6e, 0x3d, + 0x22, 0xee, 0x2b, 0x53, 0xf3, 0xe7, 0xee, 0x1d, 0xcc, 0xcd, 0xe3, 0x43, 0xd1, 0xc6, 0x87, 0xec, + 0x0b, 0xfa, 0x75, 0x0b, 0x2e, 0xf1, 0xd8, 0xf0, 0xfd, 0xf7, 0xbf, 0xe0, 0xb5, 0x5c, 0x93, 0xa4, + 0x62, 0x22, 0x1b, 0x24, 0x68, 0x2f, 0x7e, 0x48, 0x0c, 0xe8, 0xa5, 0xda, 0xe1, 0xda, 0xc2, 0x87, + 0xed, 0x9c, 0xfd, 0xcf, 0xca, 0x30, 0x2e, 0x62, 0x46, 0x89, 0x3b, 0xe0, 0x45, 0x63, 0x49, 0x3c, + 0x9e, 0x58, 0x12, 0xd3, 0x06, 0xf2, 0xd1, 0x1c, 0xff, 0x21, 0x4c, 0xd3, 0xc3, 0xf9, 0x2a, 0x71, + 0x82, 0x68, 0x93, 0x38, 0xdc, 0xe0, 0xab, 0x7c, 0xe8, 0xd3, 0x5f, 0xc9, 0x27, 0xaf, 0x27, 0x89, + 0xe1, 0x34, 0xfd, 0x6f, 0xa5, 0x3b, 0xc7, 0x83, 0xa9, 0x54, 0xd8, 0xaf, 0x4f, 0x40, 0x45, 0xf9, + 0x71, 0x88, 0x43, 0xa7, 0x38, 0x7a, 0x5e, 0x92, 0x02, 0x17, 0x7f, 0xc5, 0x3e, 0x44, 0x31, 0x39, + 0xfb, 0x1f, 0x94, 0x8c, 0x06, 0xf9, 0x24, 0xae, 0xc3, 0x88, 0x13, 0x86, 0xee, 0xb6, 0x47, 0x9a, + 0x45, 0x12, 0xca, 0x54, 0x33, 0xcc, 0x97, 0x66, 0x41, 0xd4, 0xc4, 0x8a, 0x06, 0xba, 0xca, 0xcd, + 0xea, 0xf6, 0x48, 0x91, 0x78, 0x32, 0x45, 0x0d, 0xa4, 0xe1, 0xdd, 0x1e, 0xc1, 0xa2, 0x3e, 0xfa, + 0x24, 0xb7, 0x7b, 0xbc, 0xe6, 0xf9, 0x77, 0xbc, 0x2b, 0xbe, 0x2f, 0xe3, 0x32, 0xf4, 0x47, 0x70, + 0x5a, 0x5a, 0x3b, 0xaa, 0xea, 0xd8, 0xa4, 0xd6, 0x5f, 0x1c, 0xcd, 0xcf, 0xc2, 0x09, 0x4a, 0xda, + 0x74, 0x9b, 0x0e, 0x11, 0x81, 0x49, 0x11, 0x90, 0x4c, 0x96, 0x89, 0xb1, 0xcb, 0x7c, 0xca, 0x99, + 0xb5, 0x63, 0x41, 0xfa, 0x35, 0x93, 0x04, 0x4e, 0xd2, 0xb4, 0x7f, 0xda, 0x02, 0xe6, 0x42, 0x7a, + 0x0c, 0xfc, 0xc8, 0x47, 0x4c, 0x7e, 0x64, 0x26, 0x6f, 0x90, 0x73, 0x58, 0x91, 0x17, 0xf8, 0xca, + 0xaa, 0x05, 0xfe, 0xdd, 0x7d, 0x61, 0xac, 0xd2, 0xfb, 0xfd, 0x61, 0xff, 0x1f, 0x8b, 0x1f, 0x62, + 0xca, 0xcb, 0x02, 0x7d, 0x27, 0x8c, 0x34, 0x9c, 0x8e, 0xd3, 0xe0, 0x19, 0x5b, 0x72, 0x25, 0x7a, + 0x46, 0xa5, 0xf9, 0x25, 0x51, 0x83, 0x4b, 0xa8, 0x64, 0x60, 0xbb, 0x11, 0x59, 0xdc, 0x53, 0x2a, + 0xa5, 0x9a, 0x9c, 0xdd, 0x85, 0x71, 0x83, 0xd8, 0x43, 0x15, 0x67, 0x7c, 0x27, 0xbf, 0x62, 0x55, + 0x20, 0xc6, 0x36, 0x4c, 0x7b, 0xda, 0x7f, 0x7a, 0xa1, 0xc8, 0xc7, 0xe5, 0x7b, 0x7b, 0x5d, 0xa2, + 0xec, 0xf6, 0xd1, 0xbc, 0x53, 0x13, 0x64, 0x70, 0x9a, 0xb2, 0xfd, 0x93, 0x16, 0x3c, 0xa2, 0x23, + 0x6a, 0x0e, 0x30, 0xbd, 0x94, 0x24, 0x55, 0x18, 0xf1, 0x3b, 0x24, 0x70, 0x22, 0x3f, 0x10, 0xb7, + 0xc6, 0x45, 0x39, 0xe8, 0x37, 0x44, 0xf9, 0x7d, 0x11, 0xef, 0x5c, 0x52, 0x97, 0xe5, 0x58, 0xd5, + 0xa4, 0xaf, 0x4f, 0x36, 0x18, 0xa1, 0x70, 0x75, 0x62, 0x67, 0x00, 0xd3, 0xa4, 0x87, 0x58, 0x40, + 0xec, 0xaf, 0x5b, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0x6f, 0xc2, 0x54, 0xdb, 0x89, 0x1a, 0x3b, 0xcb, + 0x77, 0x3b, 0x01, 0x57, 0x39, 0xc9, 0x71, 0x7a, 0xba, 0xd7, 0x38, 0x69, 0x1f, 0x19, 0x9b, 0x72, + 0xae, 0x25, 0x88, 0xe1, 0x14, 0x79, 0xb4, 0x09, 0xa3, 0xac, 0x8c, 0x79, 0xf1, 0x85, 0x45, 0xac, + 0x41, 0x5e, 0x6b, 0xca, 0x18, 0x61, 0x2d, 0xa6, 0x83, 0x75, 0xa2, 0xf6, 0x97, 0xcb, 0x7c, 0xb7, + 0x33, 0x56, 0xfe, 0x29, 0x18, 0xee, 0xf8, 0xcd, 0xa5, 0xd5, 0x2a, 0x16, 0xb3, 0xa0, 0xae, 0x91, + 0x1a, 0x2f, 0xc6, 0x12, 0x8e, 0x2e, 0xc2, 0x88, 0xf8, 0x29, 0x55, 0x84, 0xec, 0x6c, 0x16, 0x78, + 0x21, 0x56, 0x50, 0xf4, 0x1c, 0x40, 0x27, 0xf0, 0xf7, 0xdc, 0x26, 0x8b, 0x2e, 0x51, 0x36, 0xed, + 0x88, 0x6a, 0x0a, 0x82, 0x35, 0x2c, 0xf4, 0x0a, 0x8c, 0x77, 0xbd, 0x90, 0xb3, 0x23, 0x5a, 0x2c, + 0x59, 0x65, 0xe1, 0x72, 0x53, 0x07, 0x62, 0x13, 0x17, 0x2d, 0xc0, 0x50, 0xe4, 0x30, 0xbb, 0x98, + 0xc1, 0x7c, 0x73, 0xdf, 0x0d, 0x8a, 0xa1, 0x27, 0x07, 0xa1, 0x15, 0xb0, 0xa8, 0x88, 0x3e, 0x21, + 0x1d, 0x6a, 0xf9, 0xc1, 0x2e, 0xec, 0xec, 0xfb, 0xbb, 0x04, 0x34, 0x77, 0x5a, 0x61, 0xbf, 0x6f, + 0xd0, 0x42, 0x2f, 0x03, 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0xa5, 0xac, 0xd9, 0x14, 0x5f, 0x50, + 0xf5, 0xd7, 0xfd, 0xe8, 0x66, 0x48, 0x96, 0x15, 0x06, 0xd6, 0xb0, 0xed, 0x5f, 0xaf, 0x00, 0xc4, + 0x7c, 0x3b, 0x7a, 0x2b, 0x75, 0x70, 0x3d, 0x53, 0xcc, 0xe9, 0x1f, 0xdd, 0xa9, 0x85, 0xbe, 0xcf, + 0x82, 0x51, 0xa7, 0xd5, 0xf2, 0x1b, 0x0e, 0x8f, 0xf6, 0x5b, 0x2a, 0x3e, 0x38, 0x45, 0xfb, 0x0b, + 0x71, 0x0d, 0xde, 0x85, 0xe7, 0xe5, 0x0a, 0xd5, 0x20, 0x3d, 0x7b, 0xa1, 0x37, 0x8c, 0x3e, 0x20, + 0x9f, 0x8a, 0x65, 0x63, 0x28, 0xd5, 0x53, 0xb1, 0xc2, 0xee, 0x08, 0xfd, 0x95, 0x78, 0xd3, 0x78, + 0x25, 0x0e, 0xe4, 0x7b, 0x0c, 0x1a, 0xec, 0x6b, 0xaf, 0x07, 0x22, 0xaa, 0xe9, 0xd1, 0x03, 0x06, + 0xf3, 0xdd, 0xf3, 0xb4, 0x77, 0x52, 0x8f, 0xc8, 0x01, 0x9f, 0x81, 0xc9, 0xa6, 0xc9, 0x04, 0x88, + 0x95, 0xf8, 0x64, 0x1e, 0xdd, 0x04, 0xcf, 0x10, 0x5f, 0xfb, 0x09, 0x00, 0x4e, 0x12, 0x46, 0x35, + 0x1e, 0x4c, 0x62, 0xd5, 0xdb, 0xf2, 0x85, 0xaf, 0x87, 0x9d, 0x3b, 0x97, 0xfb, 0x61, 0x44, 0xda, + 0x14, 0x33, 0xbe, 0xdd, 0xd7, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0xd7, 0x60, 0x88, 0xf9, 0x67, 0x85, + 0x33, 0x23, 0xf9, 0x12, 0x67, 0x33, 0x3a, 0x5a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, 0x0a, 0xe8, + 0xaa, 0xf4, 0x7e, 0x0c, 0x57, 0xbd, 0x9b, 0x21, 0x61, 0xde, 0x8f, 0x95, 0xc5, 0xf7, 0xc6, 0x8e, + 0x8d, 0xbc, 0x3c, 0x33, 0x85, 0x98, 0x51, 0x93, 0x72, 0x51, 0xe2, 0xbf, 0xcc, 0x4c, 0x36, 0x03, + 0xf9, 0xdd, 0x33, 0xb3, 0x97, 0xc5, 0xc3, 0x79, 0xcb, 0x24, 0x81, 0x93, 0x34, 0x29, 0x47, 0xca, + 0x77, 0xbd, 0xf0, 0x16, 0xe9, 0x75, 0x76, 0xf0, 0x87, 0x38, 0xbb, 0x8d, 0x78, 0x09, 0x16, 0xf5, + 0x8f, 0x95, 0x3d, 0x98, 0xf5, 0x60, 0x2a, 0xb9, 0x45, 0x1f, 0x2a, 0x3b, 0xf2, 0x07, 0x03, 0x30, + 0x61, 0x2e, 0x29, 0x74, 0x09, 0x2a, 0x82, 0x88, 0xca, 0x26, 0xa0, 0x76, 0xc9, 0x9a, 0x04, 0xe0, + 0x18, 0x87, 0x25, 0x91, 0x60, 0xd5, 0x35, 0xf3, 0xe0, 0x38, 0x89, 0x84, 0x82, 0x60, 0x0d, 0x8b, + 0x3e, 0xac, 0x36, 0x7d, 0x3f, 0x52, 0x17, 0x92, 0x5a, 0x77, 0x8b, 0xac, 0x14, 0x0b, 0x28, 0xbd, + 0x88, 0x76, 0x49, 0xe0, 0x91, 0x96, 0x19, 0x77, 0x58, 0x5d, 0x44, 0xd7, 0x74, 0x20, 0x36, 0x71, + 0xe9, 0x75, 0xea, 0x87, 0x6c, 0x21, 0x8b, 0xe7, 0x5b, 0x6c, 0x6e, 0x5d, 0xe7, 0x0e, 0xd8, 0x12, + 0x8e, 0x3e, 0x0e, 0x8f, 0xa8, 0xd8, 0x4a, 0x98, 0x6b, 0x33, 0x64, 0x8b, 0x43, 0x86, 0xb4, 0xe5, + 0x91, 0xa5, 0x6c, 0x34, 0x9c, 0x57, 0x1f, 0xbd, 0x0a, 0x13, 0x82, 0xc5, 0x97, 0x14, 0x87, 0x4d, + 0x0b, 0xa3, 0x6b, 0x06, 0x14, 0x27, 0xb0, 0x65, 0xe4, 0x64, 0xc6, 0x65, 0x4b, 0x0a, 0x23, 0xe9, + 0xc8, 0xc9, 0x3a, 0x1c, 0xa7, 0x6a, 0xa0, 0x05, 0x98, 0xe4, 0x3c, 0x98, 0xeb, 0x6d, 0xf3, 0x39, + 0x11, 0xce, 0x5c, 0x6a, 0x4b, 0xdd, 0x30, 0xc1, 0x38, 0x89, 0x8f, 0x5e, 0x82, 0x31, 0x27, 0x68, + 0xec, 0xb8, 0x11, 0x69, 0x44, 0xdd, 0x80, 0x7b, 0x79, 0x69, 0x26, 0x5a, 0x0b, 0x1a, 0x0c, 0x1b, + 0x98, 0xf6, 0x5b, 0x70, 0x22, 0x23, 0x32, 0x03, 0x5d, 0x38, 0x4e, 0xc7, 0x95, 0xdf, 0x94, 0xb0, + 0x70, 0x5e, 0xa8, 0xad, 0xca, 0xaf, 0xd1, 0xb0, 0xe8, 0xea, 0x64, 0x11, 0x1c, 0xb4, 0x44, 0x84, + 0x6a, 0x75, 0xae, 0x48, 0x00, 0x8e, 0x71, 0xec, 0xff, 0x51, 0x82, 0xc9, 0x0c, 0xdd, 0x0a, 0x4b, + 0x86, 0x97, 0x78, 0xa4, 0xc4, 0xb9, 0xef, 0xcc, 0x40, 0xdc, 0xa5, 0x43, 0x04, 0xe2, 0x2e, 0xf7, + 0x0a, 0xc4, 0x3d, 0xf0, 0x76, 0x02, 0x71, 0x9b, 0x23, 0x36, 0xd8, 0xd7, 0x88, 0x65, 0x04, 0xef, + 0x1e, 0x3a, 0x64, 0xf0, 0x6e, 0x63, 0xd0, 0x87, 0xfb, 0x18, 0xf4, 0x1f, 0x2d, 0xc1, 0x54, 0xd2, + 0x94, 0xf4, 0x18, 0xe4, 0xb6, 0xaf, 0x19, 0x72, 0xdb, 0x8b, 0xfd, 0x38, 0xdf, 0xe6, 0xca, 0x70, + 0x71, 0x42, 0x86, 0xfb, 0xfe, 0xbe, 0xa8, 0x15, 0xcb, 0x73, 0xff, 0x46, 0x09, 0x4e, 0x65, 0x7a, + 0xff, 0x1e, 0xc3, 0xd8, 0xdc, 0x30, 0xc6, 0xe6, 0xd9, 0xbe, 0x1d, 0x93, 0x73, 0x07, 0xe8, 0x76, + 0x62, 0x80, 0x2e, 0xf5, 0x4f, 0xb2, 0x78, 0x94, 0xbe, 0x56, 0x86, 0x73, 0x99, 0xf5, 0x62, 0xb1, + 0xe7, 0x8a, 0x21, 0xf6, 0x7c, 0x2e, 0x21, 0xf6, 0xb4, 0x8b, 0x6b, 0x1f, 0x8d, 0x1c, 0x54, 0x38, + 0xe8, 0xb2, 0x30, 0x03, 0x0f, 0x28, 0x03, 0x35, 0x1c, 0x74, 0x15, 0x21, 0x6c, 0xd2, 0xfd, 0x56, + 0x92, 0x7d, 0xfe, 0x5b, 0x0b, 0xce, 0x64, 0xce, 0xcd, 0x31, 0xc8, 0xba, 0xd6, 0x4d, 0x59, 0xd7, + 0x53, 0x7d, 0xaf, 0xd6, 0x1c, 0xe1, 0xd7, 0x97, 0x07, 0x73, 0xbe, 0x85, 0xbd, 0xe4, 0x6f, 0xc0, + 0xa8, 0xd3, 0x68, 0x90, 0x30, 0x5c, 0xf3, 0x9b, 0x2a, 0xd6, 0xf0, 0xb3, 0xec, 0x9d, 0x15, 0x17, + 0xdf, 0x3f, 0x98, 0x9b, 0x4d, 0x92, 0x88, 0xc1, 0x58, 0xa7, 0x80, 0x3e, 0x09, 0x23, 0xa1, 0xb8, + 0x37, 0xc5, 0xdc, 0x3f, 0xdf, 0xe7, 0xe0, 0x38, 0x9b, 0xa4, 0x65, 0x06, 0x43, 0x52, 0x92, 0x0a, + 0x45, 0xd2, 0x0c, 0x9c, 0x52, 0x3a, 0xd2, 0xc0, 0x29, 0xcf, 0x01, 0xec, 0xa9, 0xc7, 0x40, 0x52, + 0xfe, 0xa0, 0x3d, 0x13, 0x34, 0x2c, 0xf4, 0x51, 0x98, 0x0a, 0x79, 0xb4, 0xc0, 0xa5, 0x96, 0x13, + 0x32, 0x3f, 0x1a, 0xb1, 0x0a, 0x59, 0xc0, 0xa5, 0x7a, 0x02, 0x86, 0x53, 0xd8, 0x68, 0x45, 0xb6, + 0xca, 0x42, 0x1b, 0xf2, 0x85, 0x79, 0x21, 0x6e, 0x51, 0xa4, 0xe2, 0x3d, 0x99, 0x1c, 0x7e, 0x36, + 0xf0, 0x5a, 0x4d, 0xf4, 0x49, 0x00, 0xba, 0x7c, 0x84, 0x1c, 0x62, 0x38, 0xff, 0xf0, 0xa4, 0xa7, + 0x4a, 0x33, 0xd3, 0xb8, 0x99, 0xf9, 0xd4, 0x56, 0x15, 0x11, 0xac, 0x11, 0x44, 0x5b, 0x30, 0x1e, + 0xff, 0x8b, 0x33, 0x55, 0x1e, 0xb2, 0x05, 0x26, 0xf7, 0xae, 0xea, 0x74, 0xb0, 0x49, 0xd6, 0xfe, + 0xf1, 0x61, 0x78, 0xb4, 0xe0, 0x2c, 0x46, 0x0b, 0xa6, 0xbe, 0xf7, 0xe9, 0xe4, 0x23, 0x7e, 0x36, + 0xb3, 0xb2, 0xf1, 0xaa, 0x4f, 0x2c, 0xf9, 0xd2, 0xdb, 0x5e, 0xf2, 0x3f, 0x64, 0x69, 0xe2, 0x15, + 0x6e, 0x59, 0xfa, 0x91, 0x43, 0xde, 0x31, 0x47, 0x28, 0x6f, 0xd9, 0xca, 0x10, 0x5a, 0x3c, 0xd7, + 0x77, 0x77, 0xfa, 0x97, 0x62, 0x7c, 0xc5, 0x02, 0x24, 0xc4, 0x2b, 0xa4, 0xa9, 0x36, 0x94, 0x90, + 0x67, 0x5c, 0x39, 0xec, 0xf7, 0x2f, 0xa4, 0x28, 0xf1, 0x91, 0x78, 0x59, 0x5e, 0x06, 0x69, 0x84, + 0x9e, 0x63, 0x92, 0xd1, 0x3d, 0xf4, 0x71, 0x16, 0x4d, 0xd7, 0x7d, 0x4b, 0x70, 0x40, 0x62, 0xc3, + 0xbd, 0x28, 0x22, 0xe9, 0xaa, 0x72, 0xca, 0xea, 0x66, 0x76, 0x57, 0x47, 0xc2, 0x06, 0xa9, 0xe3, + 0x7d, 0x7f, 0x77, 0xe1, 0x91, 0x9c, 0x21, 0x7b, 0xa8, 0xcf, 0xf0, 0xdf, 0xb2, 0xe0, 0x6c, 0x61, + 0x58, 0x98, 0x6f, 0x42, 0x06, 0xd1, 0xfe, 0x9c, 0x05, 0xd9, 0x93, 0x6d, 0x98, 0x95, 0x5d, 0x82, + 0x4a, 0x83, 0x16, 0x6a, 0x7e, 0xc0, 0x71, 0x80, 0x04, 0x09, 0xc0, 0x31, 0x8e, 0x61, 0x3d, 0x56, + 0xea, 0x69, 0x3d, 0xf6, 0x2b, 0x16, 0xa4, 0x0e, 0xf9, 0x63, 0xe0, 0x36, 0x56, 0x4d, 0x6e, 0xe3, + 0xbd, 0xfd, 0x8c, 0x66, 0x0e, 0xa3, 0xf1, 0xc7, 0x93, 0x70, 0x3a, 0xc7, 0x2d, 0x6f, 0x0f, 0xa6, + 0xb7, 0x1b, 0xc4, 0xf4, 0xb0, 0x2e, 0x8a, 0x3c, 0x54, 0xe8, 0x8e, 0xcd, 0x92, 0xc3, 0x4e, 0xa7, + 0x50, 0x70, 0xba, 0x09, 0xf4, 0x39, 0x0b, 0x4e, 0x3a, 0x77, 0xc2, 0x65, 0xca, 0x35, 0xba, 0x8d, + 0xc5, 0x96, 0xdf, 0xd8, 0xa5, 0x57, 0xb2, 0xdc, 0x08, 0x2f, 0x64, 0x4a, 0xf2, 0x6e, 0xd7, 0x53, + 0xf8, 0x46, 0xf3, 0x2c, 0x5b, 0x6e, 0x16, 0x16, 0xce, 0x6c, 0x0b, 0x61, 0x91, 0x42, 0x81, 0xbe, + 0x49, 0x0b, 0x62, 0x00, 0x64, 0xf9, 0x4f, 0x72, 0x36, 0x48, 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0d, + 0x95, 0x6d, 0xe9, 0xee, 0x9b, 0xc1, 0x66, 0xc5, 0x03, 0x59, 0xec, 0x04, 0xcd, 0xd5, 0xf1, 0x0a, + 0x09, 0xc7, 0x44, 0xd1, 0xab, 0x50, 0xf6, 0xb6, 0xc2, 0xa2, 0x84, 0xb3, 0x09, 0xbb, 0x4b, 0x1e, + 0x69, 0x63, 0x7d, 0xa5, 0x8e, 0x69, 0x45, 0x74, 0x15, 0xca, 0xc1, 0x66, 0x53, 0x88, 0xa1, 0x33, + 0x37, 0x29, 0x5e, 0xac, 0xe6, 0xf4, 0x8a, 0x51, 0xc2, 0x8b, 0x55, 0x4c, 0x49, 0xa0, 0x1a, 0x0c, + 0x32, 0x5f, 0x36, 0xc1, 0xd4, 0x64, 0x3e, 0xdf, 0x0a, 0x7c, 0x42, 0x79, 0x38, 0x0e, 0x86, 0x80, + 0x39, 0x21, 0xb4, 0x01, 0x43, 0x0d, 0x96, 0x9c, 0x54, 0x70, 0x31, 0x1f, 0xc8, 0x14, 0x38, 0x17, + 0x64, 0x6d, 0x15, 0xf2, 0x57, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0x74, 0x76, 0xb6, 0x42, 0x91, + 0x4c, 0x3b, 0x9b, 0x6a, 0x41, 0x32, 0x62, 0x41, 0x95, 0x61, 0x60, 0x41, 0x0b, 0xbd, 0x0c, 0xa5, + 0xad, 0x86, 0xf0, 0x53, 0xcb, 0x94, 0x3c, 0x9b, 0xc1, 0x52, 0x16, 0x87, 0xee, 0x1d, 0xcc, 0x95, + 0x56, 0x96, 0x70, 0x69, 0xab, 0x81, 0xd6, 0x61, 0x78, 0x8b, 0x87, 0x57, 0x10, 0xc2, 0xe5, 0x27, + 0xb3, 0x23, 0x3f, 0xa4, 0x22, 0x30, 0x70, 0x9f, 0x27, 0x01, 0xc0, 0x92, 0x08, 0xcb, 0x48, 0xa0, + 0xc2, 0x44, 0x88, 0x28, 0x75, 0xf3, 0x87, 0x0b, 0xed, 0xc1, 0x99, 0xcc, 0x38, 0xd8, 0x04, 0xd6, + 0x28, 0xd2, 0x55, 0xed, 0xbc, 0xd5, 0x0d, 0x58, 0x28, 0x70, 0x11, 0xce, 0x28, 0x73, 0x55, 0x2f, + 0x48, 0xa4, 0xa2, 0x55, 0xad, 0x90, 0x70, 0x4c, 0x14, 0xed, 0xc2, 0xf8, 0x5e, 0xd8, 0xd9, 0x21, + 0x72, 0x4b, 0xb3, 0xe8, 0x46, 0x39, 0xfc, 0xd1, 0x2d, 0x81, 0xe8, 0x06, 0x51, 0xd7, 0x69, 0xa5, + 0x4e, 0x21, 0xc6, 0xcb, 0xde, 0xd2, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, 0x37, 0xbb, 0xfe, 0xe6, + 0x7e, 0x44, 0x44, 0x70, 0xb9, 0xcc, 0xe1, 0x7f, 0x9d, 0xa3, 0xa4, 0x87, 0x5f, 0x00, 0xb0, 0x24, + 0x82, 0x6e, 0x89, 0xe1, 0x61, 0xa7, 0xe7, 0x54, 0x7e, 0x04, 0xd8, 0x05, 0x89, 0x94, 0x33, 0x28, + 0xec, 0xb4, 0x8c, 0x49, 0xb1, 0x53, 0xb2, 0xb3, 0xe3, 0x47, 0xbe, 0x97, 0x38, 0xa1, 0xa7, 0xf3, + 0x4f, 0xc9, 0x5a, 0x06, 0x7e, 0xfa, 0x94, 0xcc, 0xc2, 0xc2, 0x99, 0x6d, 0xa1, 0x26, 0x4c, 0x74, + 0xfc, 0x20, 0xba, 0xe3, 0x07, 0x72, 0x7d, 0xa1, 0x02, 0xe1, 0x98, 0x81, 0x29, 0x5a, 0x64, 0x71, + 0x1b, 0x4d, 0x08, 0x4e, 0xd0, 0x44, 0x1f, 0x83, 0xe1, 0xb0, 0xe1, 0xb4, 0xc8, 0xea, 0x8d, 0x99, + 0x13, 0xf9, 0xd7, 0x4f, 0x9d, 0xa3, 0xe4, 0xac, 0x2e, 0x1e, 0x1d, 0x83, 0xa3, 0x60, 0x49, 0x0e, + 0xad, 0xc0, 0x20, 0xcb, 0x38, 0xc7, 0x22, 0x21, 0xe6, 0x04, 0xb2, 0x4d, 0x59, 0xc1, 0xf3, 0xb3, + 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0xf1, 0x46, 0xf4, 0xc3, 0x99, 0x53, 0xf9, 0x7b, 0x40, + 0x3c, 0x2d, 0x6f, 0xd4, 0x8b, 0xf6, 0x80, 0x42, 0xc2, 0x31, 0x51, 0x7a, 0x32, 0xd3, 0xd3, 0xf4, + 0x74, 0x81, 0xf9, 0x56, 0xee, 0x59, 0xca, 0x4e, 0x66, 0x7a, 0x92, 0x52, 0x12, 0xf6, 0xef, 0x0d, + 0xa7, 0x79, 0x16, 0x26, 0x55, 0xf8, 0x1e, 0x2b, 0xa5, 0x70, 0xfe, 0x60, 0xbf, 0x42, 0xce, 0x23, + 0x7c, 0x0a, 0x7d, 0xce, 0x82, 0xd3, 0x9d, 0xcc, 0x0f, 0x11, 0x0c, 0x40, 0x7f, 0xb2, 0x52, 0xfe, + 0xe9, 0x2a, 0x6a, 0x66, 0x36, 0x1c, 0xe7, 0xb4, 0x94, 0x7c, 0x6e, 0x96, 0xdf, 0xf6, 0x73, 0x73, + 0x0d, 0x46, 0x1a, 0xfc, 0x29, 0x52, 0x98, 0xac, 0x3b, 0xf9, 0xf6, 0x66, 0xac, 0x84, 0x78, 0xc3, + 0x6c, 0x61, 0x45, 0x02, 0xfd, 0xb0, 0x05, 0x67, 0x93, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0xa8, 0x4d, + 0x2e, 0xd0, 0x58, 0x11, 0xdf, 0x9f, 0xe2, 0xff, 0x0d, 0xe4, 0xfb, 0xbd, 0x10, 0x70, 0x71, 0x63, + 0xa8, 0x9a, 0x21, 0x51, 0x19, 0x32, 0xb5, 0x48, 0x7d, 0x48, 0x55, 0x5e, 0x80, 0xb1, 0xb6, 0xdf, + 0xf5, 0x22, 0x61, 0xed, 0x25, 0x2c, 0x4f, 0x98, 0xc5, 0xc5, 0x9a, 0x56, 0x8e, 0x0d, 0xac, 0x84, + 0x2c, 0x66, 0xe4, 0x81, 0x65, 0x31, 0x6f, 0xc0, 0x98, 0xa7, 0x99, 0x27, 0x0b, 0x7e, 0xe0, 0x42, + 0x7e, 0x98, 0x5c, 0xdd, 0x98, 0x99, 0xf7, 0x52, 0x2f, 0xc1, 0x06, 0xb5, 0xe3, 0x35, 0x03, 0xfb, + 0x92, 0x95, 0xc1, 0xd4, 0x73, 0x51, 0xcc, 0x87, 0x4d, 0x51, 0xcc, 0x85, 0xa4, 0x28, 0x26, 0xa5, + 0x41, 0x30, 0xa4, 0x30, 0xfd, 0x67, 0x01, 0xea, 0x37, 0xd4, 0xa6, 0xdd, 0x82, 0xf3, 0xbd, 0xae, + 0x25, 0x66, 0xf6, 0xd7, 0x54, 0xfa, 0xe2, 0xd8, 0xec, 0xaf, 0xb9, 0x5a, 0xc5, 0x0c, 0xd2, 0x6f, + 0x10, 0x27, 0xfb, 0xbf, 0x59, 0x50, 0xae, 0xf9, 0xcd, 0x63, 0x78, 0xf0, 0x7e, 0xc4, 0x78, 0xf0, + 0x3e, 0x9a, 0x7d, 0x21, 0x36, 0x73, 0xf5, 0x1f, 0xcb, 0x09, 0xfd, 0xc7, 0xd9, 0x3c, 0x02, 0xc5, + 0xda, 0x8e, 0x9f, 0x2a, 0xc3, 0x68, 0xcd, 0x6f, 0x2a, 0x9b, 0xfb, 0x7f, 0xf1, 0x20, 0x36, 0xf7, + 0xb9, 0xb9, 0x2c, 0x34, 0xca, 0xcc, 0x5a, 0x50, 0xba, 0x1b, 0x7f, 0x93, 0x99, 0xde, 0xdf, 0x26, + 0xee, 0xf6, 0x4e, 0x44, 0x9a, 0xc9, 0xcf, 0x39, 0x3e, 0xd3, 0xfb, 0xdf, 0x2b, 0xc1, 0x64, 0xa2, + 0x75, 0xd4, 0x82, 0xf1, 0x96, 0x2e, 0x5d, 0x17, 0xeb, 0xf4, 0x81, 0x04, 0xf3, 0xc2, 0x74, 0x59, + 0x2b, 0xc2, 0x26, 0x71, 0x34, 0x0f, 0xa0, 0xd4, 0xcd, 0x52, 0xbc, 0xca, 0xb8, 0x7e, 0xa5, 0x8f, + 0x0e, 0xb1, 0x86, 0x81, 0x5e, 0x84, 0xd1, 0xc8, 0xef, 0xf8, 0x2d, 0x7f, 0x7b, 0xff, 0x1a, 0x91, + 0xf1, 0xbd, 0x94, 0x41, 0xe2, 0x46, 0x0c, 0xc2, 0x3a, 0x1e, 0xba, 0x0b, 0xd3, 0x8a, 0x48, 0xfd, + 0x08, 0x34, 0x0e, 0x4c, 0xaa, 0xb0, 0x9e, 0xa4, 0x88, 0xd3, 0x8d, 0xd8, 0x3f, 0x53, 0xe6, 0x43, + 0xec, 0x45, 0xee, 0xbb, 0xbb, 0xe1, 0x9d, 0xbd, 0x1b, 0xbe, 0x66, 0xc1, 0x14, 0x6d, 0x9d, 0x59, + 0x5b, 0xc9, 0x6b, 0x5e, 0x05, 0xe6, 0xb6, 0x0a, 0x02, 0x73, 0x5f, 0xa0, 0xa7, 0x66, 0xd3, 0xef, + 0x46, 0x42, 0x76, 0xa7, 0x1d, 0x8b, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x41, 0x20, 0x3c, 0x44, + 0x75, 0x3c, 0x12, 0x04, 0x58, 0x40, 0x65, 0xdc, 0xee, 0x81, 0xec, 0xb8, 0xdd, 0x3c, 0xfc, 0xaa, + 0xb0, 0xcb, 0x11, 0x0c, 0x97, 0x16, 0x7e, 0x55, 0x1a, 0xec, 0xc4, 0x38, 0xf6, 0x57, 0xca, 0x30, + 0x56, 0xf3, 0x9b, 0xb1, 0xaa, 0xf9, 0x05, 0x43, 0xd5, 0x7c, 0x3e, 0xa1, 0x6a, 0x9e, 0xd2, 0x71, + 0xdf, 0x55, 0x2c, 0x7f, 0xa3, 0x14, 0xcb, 0xff, 0xd4, 0x62, 0xb3, 0x56, 0x5d, 0xaf, 0x73, 0xe3, + 0x3d, 0x74, 0x19, 0x46, 0xd9, 0x01, 0xc3, 0x5c, 0x92, 0xa5, 0xfe, 0x95, 0xe5, 0xa3, 0x5a, 0x8f, + 0x8b, 0xb1, 0x8e, 0x83, 0x2e, 0xc2, 0x48, 0x48, 0x9c, 0xa0, 0xb1, 0xa3, 0x4e, 0x57, 0xa1, 0x2c, + 0xe5, 0x65, 0x58, 0x41, 0xd1, 0xeb, 0x71, 0xe4, 0xcf, 0x72, 0xbe, 0x8b, 0xa3, 0xde, 0x1f, 0xbe, + 0x45, 0xf2, 0xc3, 0x7d, 0xda, 0xb7, 0x01, 0xa5, 0xf1, 0xfb, 0x88, 0x4d, 0x37, 0x67, 0xc6, 0xa6, + 0xab, 0xa4, 0xe2, 0xd2, 0xfd, 0x99, 0x05, 0x13, 0x35, 0xbf, 0x49, 0xb7, 0xee, 0xb7, 0xd2, 0x3e, + 0xd5, 0xc3, 0x1e, 0x0f, 0x15, 0x84, 0x3d, 0x7e, 0x02, 0x06, 0x6b, 0x7e, 0x73, 0xb5, 0x56, 0x14, + 0x5f, 0xc0, 0xfe, 0x9b, 0x16, 0x0c, 0xd7, 0xfc, 0xe6, 0x31, 0xa8, 0x05, 0x3e, 0x6c, 0xaa, 0x05, + 0x1e, 0xc9, 0x59, 0x37, 0x39, 0x9a, 0x80, 0xbf, 0x3e, 0x00, 0xe3, 0xb4, 0x9f, 0xfe, 0xb6, 0x9c, + 0x4a, 0x63, 0xd8, 0xac, 0x3e, 0x86, 0x8d, 0x72, 0xe1, 0x7e, 0xab, 0xe5, 0xdf, 0x49, 0x4e, 0xeb, + 0x0a, 0x2b, 0xc5, 0x02, 0x8a, 0x9e, 0x81, 0x91, 0x4e, 0x40, 0xf6, 0x5c, 0x5f, 0xb0, 0xb7, 0x9a, + 0x92, 0xa5, 0x26, 0xca, 0xb1, 0xc2, 0xa0, 0xcf, 0xc2, 0xd0, 0xf5, 0xe8, 0x55, 0xde, 0xf0, 0xbd, + 0x26, 0x97, 0x9c, 0x97, 0x45, 0x6e, 0x0e, 0xad, 0x1c, 0x1b, 0x58, 0xe8, 0x36, 0x54, 0xd8, 0x7f, + 0x76, 0xec, 0x1c, 0x3e, 0xcb, 0xab, 0xc8, 0xfa, 0x27, 0x08, 0xe0, 0x98, 0x16, 0x7a, 0x0e, 0x20, + 0x92, 0xf1, 0xed, 0x43, 0x11, 0x6d, 0x4d, 0x3d, 0x05, 0x54, 0xe4, 0xfb, 0x10, 0x6b, 0x58, 0xe8, + 0x69, 0xa8, 0x44, 0x8e, 0xdb, 0xba, 0xee, 0x7a, 0x24, 0x64, 0x12, 0xf1, 0xb2, 0x4c, 0xbe, 0x27, + 0x0a, 0x71, 0x0c, 0xa7, 0xac, 0x18, 0x8b, 0xc4, 0xc1, 0x73, 0x44, 0x8f, 0x30, 0x6c, 0xc6, 0x8a, + 0x5d, 0x57, 0xa5, 0x58, 0xc3, 0x40, 0x3b, 0xf0, 0x98, 0xeb, 0xb1, 0x3c, 0x16, 0xa4, 0xbe, 0xeb, + 0x76, 0x36, 0xae, 0xd7, 0x6f, 0x91, 0xc0, 0xdd, 0xda, 0x5f, 0x74, 0x1a, 0xbb, 0xc4, 0x93, 0xf9, + 0x3b, 0xdf, 0x2b, 0xba, 0xf8, 0xd8, 0x6a, 0x01, 0x2e, 0x2e, 0xa4, 0x64, 0x3f, 0xcf, 0xd6, 0xfb, + 0x8d, 0x3a, 0x7a, 0xbf, 0x71, 0x74, 0x9c, 0xd6, 0x8f, 0x8e, 0xfb, 0x07, 0x73, 0x43, 0x37, 0xea, + 0x5a, 0x20, 0x89, 0x97, 0xe0, 0x54, 0xcd, 0x6f, 0xd6, 0xfc, 0x20, 0x5a, 0xf1, 0x83, 0x3b, 0x4e, + 0xd0, 0x94, 0xcb, 0x6b, 0x4e, 0x86, 0xd2, 0xa0, 0xe7, 0xe7, 0x20, 0x3f, 0x5d, 0x8c, 0x30, 0x19, + 0xcf, 0x33, 0x8e, 0xed, 0x90, 0x0e, 0x60, 0x0d, 0xc6, 0x3b, 0xa8, 0x4c, 0x30, 0x57, 0x9c, 0x88, + 0xa0, 0x1b, 0x2c, 0xc3, 0x75, 0x7c, 0x8d, 0x8a, 0xea, 0x4f, 0x69, 0x19, 0xae, 0x63, 0x60, 0xe6, + 0xbd, 0x6b, 0xd6, 0xb7, 0xff, 0xfb, 0x20, 0x3b, 0x51, 0x13, 0xd9, 0x44, 0xd0, 0xa7, 0x60, 0x22, + 0x24, 0xd7, 0x5d, 0xaf, 0x7b, 0x57, 0x8a, 0x30, 0x0a, 0x5c, 0xf8, 0xea, 0xcb, 0x3a, 0x26, 0x17, + 0x84, 0x9a, 0x65, 0x38, 0x41, 0x0d, 0xb5, 0x61, 0xe2, 0x8e, 0xeb, 0x35, 0xfd, 0x3b, 0xa1, 0xa4, + 0x3f, 0x92, 0x2f, 0x0f, 0xbd, 0xcd, 0x31, 0x13, 0x7d, 0x34, 0x9a, 0xbb, 0x6d, 0x10, 0xc3, 0x09, + 0xe2, 0x74, 0xd5, 0x06, 0x5d, 0x6f, 0x21, 0xbc, 0x19, 0x92, 0x40, 0xe4, 0x2a, 0x67, 0xab, 0x16, + 0xcb, 0x42, 0x1c, 0xc3, 0xe9, 0xaa, 0x65, 0x7f, 0xae, 0x04, 0x7e, 0x97, 0xa7, 0xae, 0x10, 0xab, + 0x16, 0xab, 0x52, 0xac, 0x61, 0xd0, 0x5d, 0xcd, 0xfe, 0xad, 0xfb, 0x1e, 0xf6, 0xfd, 0x48, 0x9e, + 0x03, 0x4c, 0xa7, 0xaf, 0x95, 0x63, 0x03, 0x0b, 0xad, 0x00, 0x0a, 0xbb, 0x9d, 0x4e, 0x8b, 0xd9, + 0x06, 0x39, 0x2d, 0x46, 0x8a, 0xdb, 0x4b, 0x94, 0x79, 0xe8, 0xdd, 0x7a, 0x0a, 0x8a, 0x33, 0x6a, + 0xd0, 0x03, 0x7e, 0x4b, 0x74, 0x75, 0x90, 0x75, 0x95, 0xeb, 0x4e, 0xea, 0xbc, 0x9f, 0x12, 0x86, + 0x96, 0x61, 0x38, 0xdc, 0x0f, 0x1b, 0x91, 0x88, 0x94, 0x98, 0x93, 0x30, 0xaa, 0xce, 0x50, 0xb4, + 0x7c, 0x85, 0xbc, 0x0a, 0x96, 0x75, 0x51, 0x03, 0x4e, 0x08, 0x8a, 0x4b, 0x3b, 0x8e, 0xa7, 0xd2, + 0xef, 0x70, 0x13, 0xe9, 0xcb, 0xf7, 0x0e, 0xe6, 0x4e, 0x88, 0x96, 0x75, 0xf0, 0xfd, 0x83, 0xb9, + 0xd3, 0x35, 0xbf, 0x99, 0x01, 0xc1, 0x59, 0xd4, 0xf8, 0xe2, 0x6b, 0x34, 0xfc, 0x76, 0xa7, 0x16, + 0xf8, 0x5b, 0x6e, 0x8b, 0x14, 0xe9, 0x9f, 0xea, 0x06, 0xa6, 0x58, 0x7c, 0x46, 0x19, 0x4e, 0x50, + 0xb3, 0xbf, 0x93, 0x31, 0x41, 0x2c, 0x3d, 0x77, 0xd4, 0x0d, 0x08, 0x6a, 0xc3, 0x78, 0x87, 0x6d, + 0x13, 0x91, 0x50, 0x42, 0xac, 0xf5, 0x17, 0xfa, 0x94, 0xa3, 0xdc, 0xa1, 0x77, 0x87, 0x69, 0x63, + 0x54, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x1b, 0x8f, 0xb0, 0x6b, 0xb4, 0xce, 0x85, 0x23, 0xc3, + 0xc2, 0x23, 0x43, 0xbc, 0xc7, 0x66, 0xf3, 0xa5, 0x74, 0xf1, 0xb4, 0x08, 0xaf, 0x0e, 0x2c, 0xeb, + 0xa2, 0x4f, 0xc2, 0x04, 0x7d, 0xde, 0xa8, 0xab, 0x2c, 0x9c, 0x39, 0x99, 0x1f, 0x39, 0x43, 0x61, + 0xe9, 0xc9, 0x66, 0xf4, 0xca, 0x38, 0x41, 0x0c, 0xbd, 0xce, 0x6c, 0x7a, 0x24, 0xe9, 0x52, 0x3f, + 0xa4, 0x75, 0xf3, 0x1d, 0x49, 0x56, 0x23, 0x82, 0xba, 0x70, 0x22, 0x9d, 0x9a, 0x2e, 0x9c, 0xb1, + 0xf3, 0xf9, 0xc4, 0x74, 0x76, 0xb9, 0x38, 0x2b, 0x48, 0x1a, 0x16, 0xe2, 0x2c, 0xfa, 0xe8, 0x3a, + 0x8c, 0x8b, 0x1c, 0xd5, 0x62, 0xe5, 0x96, 0x0d, 0xe1, 0xe1, 0x38, 0xd6, 0x81, 0xf7, 0x93, 0x05, + 0xd8, 0xac, 0x8c, 0xb6, 0xe1, 0xac, 0x96, 0x33, 0xea, 0x4a, 0xe0, 0x30, 0x0b, 0x00, 0x97, 0x1d, + 0xa7, 0xda, 0x05, 0xff, 0xf8, 0xbd, 0x83, 0xb9, 0xb3, 0x1b, 0x45, 0x88, 0xb8, 0x98, 0x0e, 0xba, + 0x01, 0xa7, 0xb8, 0xdf, 0x77, 0x95, 0x38, 0xcd, 0x96, 0xeb, 0x29, 0x0e, 0x82, 0x6f, 0xf9, 0x33, + 0xf7, 0x0e, 0xe6, 0x4e, 0x2d, 0x64, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0xc3, 0x50, 0x69, 0x7a, 0xa1, + 0x18, 0x83, 0x21, 0x23, 0x2d, 0x57, 0xa5, 0xba, 0x5e, 0x57, 0xdf, 0x1f, 0xff, 0xc1, 0x71, 0x05, + 0xb4, 0xcd, 0x05, 0xcc, 0x4a, 0xec, 0x31, 0x9c, 0x8a, 0x7b, 0x95, 0x94, 0x0c, 0x1a, 0x9e, 0x9f, + 0x5c, 0xb3, 0xa2, 0x1c, 0x22, 0x0c, 0xa7, 0x50, 0x83, 0x30, 0x7a, 0x0d, 0x90, 0x08, 0xff, 0xbe, + 0xd0, 0x60, 0xd9, 0x4a, 0x98, 0x3c, 0x7e, 0xc4, 0xf4, 0x45, 0xac, 0xa7, 0x30, 0x70, 0x46, 0x2d, + 0x74, 0x95, 0x9e, 0x2a, 0x7a, 0xa9, 0x38, 0xb5, 0x54, 0x12, 0xc5, 0x2a, 0xe9, 0x04, 0x84, 0x59, + 0x34, 0x99, 0x14, 0x71, 0xa2, 0x1e, 0x6a, 0xc2, 0x63, 0x4e, 0x37, 0xf2, 0x99, 0xec, 0xde, 0x44, + 0xdd, 0xf0, 0x77, 0x89, 0xc7, 0xd4, 0x66, 0x23, 0x8b, 0xe7, 0x29, 0x8b, 0xb2, 0x50, 0x80, 0x87, + 0x0b, 0xa9, 0x50, 0xd6, 0x52, 0x65, 0x4d, 0x06, 0x33, 0x9a, 0x57, 0x46, 0xe6, 0xe4, 0x17, 0x61, + 0x74, 0xc7, 0x0f, 0xa3, 0x75, 0x12, 0xdd, 0xf1, 0x83, 0x5d, 0x11, 0x95, 0x36, 0x8e, 0xf1, 0x1d, + 0x83, 0xb0, 0x8e, 0x47, 0xdf, 0x8e, 0xcc, 0xa8, 0x63, 0xb5, 0xca, 0xf4, 0xe9, 0x23, 0xf1, 0x19, + 0x73, 0x95, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xab, 0xb5, 0x25, 0xa6, 0x1b, 0x4f, 0xa0, 0xae, 0xd6, + 0x96, 0xb0, 0x84, 0xd3, 0xe5, 0x1a, 0xee, 0x38, 0x01, 0xa9, 0x05, 0x7e, 0x83, 0x84, 0x5a, 0x64, + 0xf9, 0x47, 0x79, 0xcc, 0x5d, 0xba, 0x5c, 0xeb, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x22, 0xe9, 0x7c, + 0x69, 0x13, 0xf9, 0x4a, 0x8d, 0x34, 0x3f, 0xd3, 0x67, 0xca, 0x34, 0x0f, 0xa6, 0x54, 0xa6, 0x36, + 0x1e, 0x65, 0x37, 0x9c, 0x99, 0x64, 0x6b, 0xbb, 0xff, 0x10, 0xbd, 0x4a, 0x4d, 0xb4, 0x9a, 0xa0, + 0x84, 0x53, 0xb4, 0x8d, 0x80, 0x6d, 0x53, 0x3d, 0x03, 0xb6, 0x5d, 0x82, 0x4a, 0xd8, 0xdd, 0x6c, + 0xfa, 0x6d, 0xc7, 0xf5, 0x98, 0x6e, 0x5c, 0x7b, 0xc4, 0xd4, 0x25, 0x00, 0xc7, 0x38, 0x68, 0x05, + 0x46, 0x1c, 0xa9, 0x03, 0x42, 0xf9, 0x21, 0x7a, 0x94, 0xe6, 0x87, 0x47, 0xad, 0x90, 0x5a, 0x1f, + 0x55, 0x17, 0xbd, 0x02, 0xe3, 0xc2, 0x6f, 0x59, 0x24, 0x09, 0x3d, 0x61, 0x3a, 0x97, 0xd5, 0x75, + 0x20, 0x36, 0x71, 0xd1, 0x4d, 0x18, 0x8d, 0xfc, 0x16, 0xf3, 0x90, 0xa2, 0x6c, 0xde, 0xe9, 0xfc, + 0x60, 0x73, 0x1b, 0x0a, 0x4d, 0x17, 0xbf, 0xaa, 0xaa, 0x58, 0xa7, 0x83, 0x36, 0xf8, 0x7a, 0x67, + 0x71, 0xe4, 0x49, 0x38, 0xf3, 0x48, 0xfe, 0x9d, 0xa4, 0xc2, 0xcd, 0x9b, 0xdb, 0x41, 0xd4, 0xc4, + 0x3a, 0x19, 0x74, 0x05, 0xa6, 0x3b, 0x81, 0xeb, 0xb3, 0x35, 0xa1, 0xd4, 0x7f, 0x33, 0x66, 0xd6, + 0xa8, 0x5a, 0x12, 0x01, 0xa7, 0xeb, 0x30, 0xb7, 0x73, 0x51, 0x38, 0x73, 0x86, 0x67, 0xbe, 0xe0, + 0x6f, 0x42, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb1, 0x93, 0x98, 0x8b, 0x33, 0x66, 0x66, 0xf3, 0xa3, + 0x02, 0xe9, 0x62, 0x0f, 0xce, 0xbc, 0xaa, 0xbf, 0x38, 0xa6, 0x80, 0x9a, 0x5a, 0xc2, 0x49, 0xfa, + 0x62, 0x08, 0x67, 0x1e, 0x2b, 0xb0, 0xac, 0x4b, 0x3c, 0x2f, 0x62, 0x86, 0xc0, 0x28, 0x0e, 0x71, + 0x82, 0x26, 0xfa, 0x28, 0x4c, 0x89, 0x58, 0x86, 0xf1, 0x30, 0x9d, 0x8d, 0xed, 0xce, 0x71, 0x02, + 0x86, 0x53, 0xd8, 0x3c, 0xf3, 0x84, 0xb3, 0xd9, 0x22, 0xe2, 0xe8, 0xbb, 0xee, 0x7a, 0xbb, 0xe1, + 0xcc, 0x39, 0x76, 0x3e, 0x88, 0xcc, 0x13, 0x49, 0x28, 0xce, 0xa8, 0x81, 0x36, 0x60, 0xaa, 0x13, + 0x10, 0xd2, 0x66, 0x8c, 0xbe, 0xb8, 0xcf, 0xe6, 0x78, 0xd4, 0x05, 0xda, 0x93, 0x5a, 0x02, 0x76, + 0x3f, 0xa3, 0x0c, 0xa7, 0x28, 0xa0, 0x3b, 0x30, 0xe2, 0xef, 0x91, 0x60, 0x87, 0x38, 0xcd, 0x99, + 0xf3, 0x05, 0x7e, 0x10, 0xe2, 0x72, 0xbb, 0x21, 0x70, 0x13, 0x26, 0x03, 0xb2, 0xb8, 0xb7, 0xc9, + 0x80, 0x6c, 0x0c, 0xfd, 0x88, 0x05, 0x67, 0xa4, 0x96, 0xa1, 0xde, 0xa1, 0xa3, 0xbe, 0xe4, 0x7b, + 0x61, 0x14, 0xf0, 0x38, 0x01, 0x8f, 0xe7, 0xfb, 0xce, 0x6f, 0xe4, 0x54, 0x52, 0x12, 0xd5, 0x33, + 0x79, 0x18, 0x21, 0xce, 0x6f, 0x11, 0x2d, 0xc1, 0x74, 0x48, 0x22, 0x79, 0x18, 0x2d, 0x84, 0x2b, + 0xaf, 0x57, 0xd7, 0x67, 0x9e, 0xe0, 0x41, 0x0e, 0xe8, 0x66, 0xa8, 0x27, 0x81, 0x38, 0x8d, 0x8f, + 0x2e, 0x43, 0xc9, 0x0f, 0x67, 0xde, 0x5b, 0x90, 0xa3, 0x94, 0x3e, 0xc5, 0xb9, 0xe9, 0xd8, 0x8d, + 0x3a, 0x2e, 0xf9, 0xe1, 0xec, 0xb7, 0xc3, 0x74, 0x8a, 0x63, 0x38, 0x4c, 0x6e, 0x9f, 0xd9, 0x5d, + 0x18, 0x37, 0x66, 0xe5, 0xa1, 0x6a, 0xa9, 0xff, 0xf5, 0x30, 0x54, 0x94, 0x06, 0x13, 0x5d, 0x32, + 0x15, 0xd3, 0x67, 0x92, 0x8a, 0xe9, 0x91, 0x9a, 0xdf, 0x34, 0x74, 0xd1, 0x1b, 0x19, 0xd1, 0xe0, + 0xf2, 0xce, 0x80, 0xfe, 0x0d, 0xe4, 0x35, 0xb1, 0x70, 0xb9, 0x6f, 0x0d, 0xf7, 0x40, 0xa1, 0xa4, + 0xf9, 0x0a, 0x4c, 0x7b, 0x3e, 0x63, 0x53, 0x49, 0x53, 0xf2, 0x20, 0x8c, 0xd5, 0xa8, 0xe8, 0xe1, + 0x55, 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x79, 0x85, 0xa4, 0x68, 0x9b, 0xb3, 0x12, 0x58, + 0x40, 0xd1, 0x13, 0x30, 0xd8, 0xf1, 0x9b, 0xab, 0x35, 0xc1, 0xa2, 0x6a, 0x31, 0x48, 0x9b, 0xab, + 0x35, 0xcc, 0x61, 0x68, 0x01, 0x86, 0xd8, 0x8f, 0x70, 0x66, 0x2c, 0x3f, 0x8e, 0x06, 0xab, 0xa1, + 0x65, 0x4e, 0x62, 0x15, 0xb0, 0xa8, 0xc8, 0x44, 0x6c, 0x94, 0xaf, 0x67, 0x22, 0xb6, 0xe1, 0x07, + 0x14, 0xb1, 0x49, 0x02, 0x38, 0xa6, 0x85, 0xee, 0xc2, 0x29, 0xe3, 0x2d, 0xc5, 0x97, 0x08, 0x09, + 0x85, 0x2f, 0xff, 0x13, 0x85, 0x8f, 0x28, 0xa1, 0x11, 0x3f, 0x2b, 0x3a, 0x7d, 0x6a, 0x35, 0x8b, + 0x12, 0xce, 0x6e, 0x00, 0xb5, 0x60, 0xba, 0x91, 0x6a, 0x75, 0xa4, 0xff, 0x56, 0xd5, 0x84, 0xa6, + 0x5b, 0x4c, 0x13, 0x46, 0xaf, 0xc0, 0xc8, 0x9b, 0x7e, 0xc8, 0x8e, 0x77, 0xc1, 0x56, 0x4b, 0x47, + 0xf0, 0x91, 0xd7, 0x6f, 0xd4, 0x59, 0xf9, 0xfd, 0x83, 0xb9, 0xd1, 0x9a, 0xdf, 0x94, 0x7f, 0xb1, + 0xaa, 0x80, 0xbe, 0xdf, 0x82, 0xd9, 0xf4, 0x63, 0x4d, 0x75, 0x7a, 0xbc, 0xff, 0x4e, 0xdb, 0xa2, + 0xd1, 0xd9, 0xe5, 0x5c, 0x72, 0xb8, 0xa0, 0x29, 0xfb, 0x97, 0x2c, 0x26, 0xa8, 0x13, 0x9a, 0x26, + 0x12, 0x76, 0x5b, 0xc7, 0x91, 0x30, 0x76, 0xd9, 0x50, 0x82, 0x3d, 0xb0, 0x85, 0xc4, 0x3f, 0xb7, + 0x98, 0x85, 0xc4, 0x31, 0xba, 0x42, 0xbc, 0x0e, 0x23, 0x91, 0x4c, 0xe4, 0x5b, 0x90, 0xe3, 0x56, + 0xeb, 0x14, 0xb3, 0x12, 0x51, 0x4c, 0xae, 0xca, 0xd9, 0xab, 0xc8, 0xd8, 0xff, 0x88, 0xcf, 0x80, + 0x84, 0x1c, 0x83, 0xae, 0xa1, 0x6a, 0xea, 0x1a, 0xe6, 0x7a, 0x7c, 0x41, 0x8e, 0xce, 0xe1, 0x1f, + 0x9a, 0xfd, 0x66, 0xc2, 0x9d, 0x77, 0xba, 0x69, 0x8e, 0xfd, 0x79, 0x0b, 0x20, 0x0e, 0xf1, 0xdc, + 0x47, 0xaa, 0xb6, 0x97, 0x28, 0x5b, 0xeb, 0x47, 0x7e, 0xc3, 0x6f, 0x09, 0x4d, 0xda, 0x63, 0xb1, + 0xba, 0x83, 0x97, 0xdf, 0xd7, 0x7e, 0x63, 0x85, 0x8d, 0xe6, 0x64, 0x40, 0xb9, 0x72, 0xac, 0x80, + 0x33, 0x82, 0xc9, 0x7d, 0xd1, 0x82, 0x93, 0x59, 0x76, 0xb5, 0xf4, 0x91, 0xc4, 0xc5, 0x5c, 0xca, + 0x6c, 0x4a, 0xcd, 0xe6, 0x2d, 0x51, 0x8e, 0x15, 0x46, 0xdf, 0x39, 0xf0, 0x0e, 0x17, 0x5b, 0xf9, + 0x06, 0x8c, 0xd7, 0x02, 0xa2, 0x5d, 0xae, 0xaf, 0xf2, 0x20, 0x05, 0xbc, 0x3f, 0xcf, 0x1c, 0x3a, + 0x40, 0x81, 0xfd, 0xe5, 0x12, 0x9c, 0xe4, 0xd6, 0x07, 0x0b, 0x7b, 0xbe, 0xdb, 0xac, 0xf9, 0x4d, + 0xe1, 0x3d, 0xf5, 0x09, 0x18, 0xeb, 0x68, 0xb2, 0xc9, 0xa2, 0x38, 0xa1, 0xba, 0x0c, 0x33, 0x96, + 0xa6, 0xe8, 0xa5, 0xd8, 0xa0, 0x85, 0x9a, 0x30, 0x46, 0xf6, 0xdc, 0x86, 0x52, 0x61, 0x97, 0x0e, + 0x7d, 0xd1, 0xa9, 0x56, 0x96, 0x35, 0x3a, 0xd8, 0xa0, 0xfa, 0x10, 0x32, 0x53, 0xdb, 0x3f, 0x66, + 0xc1, 0x23, 0x39, 0x51, 0x45, 0x69, 0x73, 0x77, 0x98, 0x9d, 0x87, 0x58, 0xb6, 0xaa, 0x39, 0x6e, + 0xfd, 0x81, 0x05, 0x14, 0x7d, 0x0c, 0x80, 0x5b, 0x6f, 0xd0, 0x57, 0x7a, 0xaf, 0xf0, 0x8b, 0x46, + 0xe4, 0x38, 0x2d, 0x08, 0x98, 0xac, 0x8f, 0x35, 0x5a, 0xf6, 0x17, 0x07, 0x60, 0x90, 0x67, 0xd1, + 0xaf, 0xc1, 0xf0, 0x0e, 0xcf, 0x13, 0x53, 0x38, 0x6f, 0x14, 0x57, 0xa6, 0x9e, 0x89, 0xe7, 0x4d, + 0x2b, 0xc5, 0x92, 0x0c, 0x5a, 0x83, 0x13, 0x3c, 0x5d, 0x4f, 0xab, 0x4a, 0x5a, 0xce, 0xbe, 0x14, + 0xfb, 0xf1, 0xdc, 0xb2, 0x4a, 0xfc, 0xb9, 0x9a, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0xab, 0x30, 0x41, + 0x9f, 0x61, 0x7e, 0x37, 0x92, 0x94, 0x78, 0xa2, 0x1e, 0xf5, 0xee, 0xdb, 0x30, 0xa0, 0x38, 0x81, + 0x8d, 0x5e, 0x81, 0xf1, 0x4e, 0x4a, 0xc0, 0x39, 0x18, 0x4b, 0x02, 0x4c, 0xa1, 0xa6, 0x89, 0xcb, + 0x4c, 0x6b, 0xbb, 0xcc, 0x90, 0x78, 0x63, 0x27, 0x20, 0xe1, 0x8e, 0xdf, 0x6a, 0x32, 0xf6, 0x6f, + 0x50, 0x33, 0xad, 0x4d, 0xc0, 0x71, 0xaa, 0x06, 0xa5, 0xb2, 0xe5, 0xb8, 0xad, 0x6e, 0x40, 0x62, + 0x2a, 0x43, 0x26, 0x95, 0x95, 0x04, 0x1c, 0xa7, 0x6a, 0xf4, 0x96, 0xdc, 0x0e, 0x1f, 0x8d, 0xe4, + 0xd6, 0xfe, 0x5b, 0x25, 0x30, 0xa6, 0xf6, 0x5b, 0x37, 0x81, 0x10, 0xfd, 0xb2, 0xed, 0xa0, 0xd3, + 0x10, 0x96, 0x31, 0x99, 0x5f, 0x16, 0xe7, 0x05, 0xe5, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, + 0xf1, 0x53, 0xb5, 0xc0, 0xa7, 0x97, 0x9c, 0x0c, 0x63, 0xa5, 0x2c, 0xd8, 0x87, 0xa5, 0x77, 0x6f, + 0x41, 0xc0, 0x47, 0x61, 0xe3, 0xcb, 0x29, 0x18, 0x46, 0x24, 0x75, 0xe1, 0x6b, 0x2f, 0xa9, 0xa0, + 0xcb, 0x30, 0x2a, 0xb2, 0xc2, 0x30, 0x43, 0x6b, 0xbe, 0x99, 0x98, 0xd1, 0x4b, 0x35, 0x2e, 0xc6, + 0x3a, 0x8e, 0xfd, 0x03, 0x25, 0x38, 0x91, 0xe1, 0x29, 0xc3, 0xaf, 0x91, 0x6d, 0x37, 0x8c, 0x54, + 0xea, 0x51, 0xed, 0x1a, 0xe1, 0xe5, 0x58, 0x61, 0xd0, 0xb3, 0x8a, 0x5f, 0x54, 0xc9, 0xcb, 0x49, + 0x58, 0xa2, 0x0b, 0xe8, 0x21, 0x93, 0x78, 0x9e, 0x87, 0x81, 0x6e, 0x48, 0x64, 0xa8, 0x56, 0x75, + 0x6d, 0x33, 0xb5, 0x26, 0x83, 0xd0, 0x67, 0xd4, 0xb6, 0xd2, 0x10, 0x6a, 0xcf, 0x28, 0xae, 0x23, + 0xe4, 0x30, 0xda, 0xb9, 0x88, 0x78, 0x8e, 0x17, 0x89, 0xc7, 0x56, 0x1c, 0x73, 0x90, 0x95, 0x62, + 0x01, 0xb5, 0xbf, 0x50, 0x86, 0x33, 0xb9, 0xbe, 0x73, 0xb4, 0xeb, 0x6d, 0xdf, 0x73, 0x23, 0x5f, + 0x59, 0x13, 0xf1, 0x38, 0x83, 0xa4, 0xb3, 0xb3, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0x17, 0x60, 0x90, + 0x09, 0x45, 0x53, 0x49, 0x58, 0x17, 0xab, 0x3c, 0xf0, 0x14, 0x07, 0xf7, 0x9d, 0x37, 0xfb, 0x09, + 0xca, 0xc1, 0xf8, 0xad, 0xe4, 0x85, 0x42, 0xbb, 0xeb, 0xfb, 0x2d, 0xcc, 0x80, 0xe8, 0x7d, 0x62, + 0xbc, 0x12, 0xe6, 0x33, 0xd8, 0x69, 0xfa, 0xa1, 0x36, 0x68, 0x4f, 0xc1, 0xf0, 0x2e, 0xd9, 0x0f, + 0x5c, 0x6f, 0x3b, 0x69, 0x56, 0x75, 0x8d, 0x17, 0x63, 0x09, 0x37, 0xb3, 0x06, 0x0e, 0x1f, 0x75, + 0xc2, 0xeb, 0x91, 0x9e, 0xec, 0xc9, 0x0f, 0x95, 0x61, 0x12, 0x2f, 0x56, 0xdf, 0x9d, 0x88, 0x9b, + 0xe9, 0x89, 0x38, 0xea, 0x84, 0xd7, 0xbd, 0x67, 0xe3, 0xe7, 0x2d, 0x98, 0x64, 0xb9, 0x69, 0x84, + 0x87, 0xbc, 0xeb, 0x7b, 0xc7, 0xf0, 0x14, 0x78, 0x02, 0x06, 0x03, 0xda, 0x68, 0x32, 0xfb, 0x2a, + 0xeb, 0x09, 0xe6, 0x30, 0xf4, 0x18, 0x0c, 0xb0, 0x2e, 0xd0, 0xc9, 0x1b, 0xe3, 0x47, 0x70, 0xd5, + 0x89, 0x1c, 0xcc, 0x4a, 0x59, 0xd8, 0x25, 0x4c, 0x3a, 0x2d, 0x97, 0x77, 0x3a, 0x56, 0x59, 0xbf, + 0x33, 0xbc, 0xea, 0x33, 0xbb, 0xf6, 0xf6, 0xc2, 0x2e, 0x65, 0x93, 0x2c, 0x7e, 0x66, 0xff, 0x51, + 0x09, 0xce, 0x65, 0xd6, 0xeb, 0x3b, 0xec, 0x52, 0x71, 0xed, 0x87, 0x99, 0x7d, 0xa4, 0x7c, 0x8c, + 0x46, 0xab, 0x03, 0xfd, 0x72, 0xff, 0x83, 0x7d, 0x44, 0x43, 0xca, 0x1c, 0xb2, 0x77, 0x48, 0x34, + 0xa4, 0xcc, 0xbe, 0xe5, 0x88, 0x09, 0xfe, 0xbc, 0x94, 0xf3, 0x2d, 0x4c, 0x60, 0x70, 0x91, 0x9e, + 0x33, 0x0c, 0x18, 0xca, 0x47, 0x38, 0x3f, 0x63, 0x78, 0x19, 0x56, 0x50, 0xb4, 0x00, 0x93, 0x6d, + 0xd7, 0xa3, 0x87, 0xcf, 0xbe, 0xc9, 0x8a, 0xab, 0x60, 0x75, 0x6b, 0x26, 0x18, 0x27, 0xf1, 0x91, + 0xab, 0x45, 0x4a, 0xe2, 0x5f, 0xf7, 0xca, 0xa1, 0x76, 0xdd, 0xbc, 0xa9, 0xce, 0x57, 0xa3, 0x98, + 0x11, 0x35, 0x69, 0x4d, 0x93, 0x13, 0x95, 0xfb, 0x97, 0x13, 0x8d, 0x65, 0xcb, 0x88, 0x66, 0x5f, + 0x81, 0xf1, 0x07, 0x56, 0x0c, 0xd8, 0x5f, 0x2b, 0xc3, 0xa3, 0x05, 0xdb, 0x9e, 0x9f, 0xf5, 0xc6, + 0x1c, 0x68, 0x67, 0x7d, 0x6a, 0x1e, 0x6a, 0x70, 0x72, 0xab, 0xdb, 0x6a, 0xed, 0x33, 0x5f, 0x0e, + 0xd2, 0x94, 0x18, 0x82, 0xa7, 0x94, 0xc2, 0x91, 0x93, 0x2b, 0x19, 0x38, 0x38, 0xb3, 0x26, 0x7d, + 0x62, 0xd1, 0x9b, 0x64, 0x5f, 0x91, 0x4a, 0x3c, 0xb1, 0xb0, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x02, + 0xd3, 0xce, 0x9e, 0xe3, 0xf2, 0x70, 0xd3, 0x92, 0x00, 0x7f, 0x63, 0x29, 0x79, 0xee, 0x42, 0x12, + 0x01, 0xa7, 0xeb, 0xa0, 0xd7, 0x00, 0xf9, 0x9b, 0xcc, 0xe2, 0xbb, 0x79, 0x85, 0x78, 0x42, 0xeb, + 0xca, 0xe6, 0xae, 0x1c, 0x1f, 0x09, 0x37, 0x52, 0x18, 0x38, 0xa3, 0x56, 0x22, 0x22, 0xd0, 0x50, + 0x7e, 0x44, 0xa0, 0xe2, 0x73, 0xb1, 0x67, 0xe2, 0x9b, 0xff, 0x64, 0xd1, 0xeb, 0x8b, 0x33, 0xf9, + 0x66, 0x00, 0xcd, 0x57, 0x98, 0xd5, 0x24, 0x97, 0xf5, 0x6a, 0xf1, 0x53, 0x4e, 0x69, 0x56, 0x93, + 0x31, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x84, 0xb1, 0xdb, 0xae, 0xc1, 0xe2, 0x8b, 0x28, 0x5f, 0x0a, + 0x03, 0x7d, 0x1c, 0x86, 0x9b, 0xee, 0x9e, 0x1b, 0x0a, 0x49, 0xd7, 0xa1, 0xd5, 0x4a, 0xf1, 0x39, + 0x58, 0xe5, 0x64, 0xb0, 0xa4, 0x67, 0xff, 0x50, 0x09, 0xc6, 0x65, 0x8b, 0xaf, 0x77, 0xfd, 0xc8, + 0x39, 0x86, 0x6b, 0xf9, 0x8a, 0x71, 0x2d, 0xbf, 0xaf, 0x28, 0xd4, 0x19, 0xeb, 0x52, 0xee, 0x75, + 0x7c, 0x23, 0x71, 0x1d, 0x3f, 0xd9, 0x9b, 0x54, 0xf1, 0x35, 0xfc, 0x8f, 0x2d, 0x98, 0x36, 0xf0, + 0x8f, 0xe1, 0x36, 0x58, 0x31, 0x6f, 0x83, 0xc7, 0x7b, 0x7e, 0x43, 0xce, 0x2d, 0xf0, 0xbd, 0xe5, + 0x44, 0xdf, 0xd9, 0xe9, 0xff, 0x26, 0x0c, 0xec, 0x38, 0x41, 0xb3, 0x28, 0xb5, 0x43, 0xaa, 0xd2, + 0xfc, 0x55, 0x27, 0x10, 0x6a, 0xe7, 0x67, 0xe4, 0xa8, 0xd3, 0xa2, 0x9e, 0x2a, 0x67, 0xd6, 0x14, + 0x7a, 0x09, 0x86, 0xc2, 0x86, 0xdf, 0x51, 0x9e, 0x1c, 0xe7, 0xd9, 0x40, 0xb3, 0x92, 0xfb, 0x07, + 0x73, 0xc8, 0x6c, 0x8e, 0x16, 0x63, 0x81, 0x8f, 0x3e, 0x01, 0xe3, 0xec, 0x97, 0xb2, 0x01, 0x2b, + 0xe7, 0x8b, 0x23, 0xea, 0x3a, 0x22, 0x37, 0x90, 0x34, 0x8a, 0xb0, 0x49, 0x6a, 0x76, 0x1b, 0x2a, + 0xea, 0xb3, 0x1e, 0xaa, 0xde, 0xf6, 0xdf, 0x97, 0xe1, 0x44, 0xc6, 0x9a, 0x43, 0xa1, 0x31, 0x13, + 0x97, 0xfb, 0x5c, 0xaa, 0x6f, 0x73, 0x2e, 0x42, 0xf6, 0x1a, 0x6a, 0x8a, 0xb5, 0xd5, 0x77, 0xa3, + 0x37, 0x43, 0x92, 0x6c, 0x94, 0x16, 0xf5, 0x6e, 0x94, 0x36, 0x76, 0x6c, 0x43, 0x4d, 0x1b, 0x52, + 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0xa7, 0x65, 0x38, 0x99, 0x15, 0x7d, 0x11, 0x7d, 0x36, 0x91, 0x58, + 0xf4, 0x85, 0x7e, 0xe3, 0x36, 0xf2, 0x6c, 0xa3, 0x22, 0x20, 0xdc, 0xbc, 0x99, 0x6a, 0xb4, 0xe7, + 0x30, 0x8b, 0x36, 0x59, 0x48, 0x8a, 0x80, 0x27, 0x84, 0x95, 0xc7, 0xc7, 0x07, 0xfb, 0xee, 0x80, + 0xc8, 0x24, 0x1b, 0x26, 0xec, 0x4b, 0x64, 0x71, 0x6f, 0xfb, 0x12, 0xd9, 0xf2, 0xac, 0x0b, 0xa3, + 0xda, 0xd7, 0x3c, 0xd4, 0x19, 0xdf, 0xa5, 0xb7, 0x95, 0xd6, 0xef, 0x87, 0x3a, 0xeb, 0x3f, 0x66, + 0x41, 0xc2, 0xe5, 0x40, 0x89, 0xc5, 0xac, 0x5c, 0xb1, 0xd8, 0x79, 0x18, 0x08, 0xfc, 0x16, 0x49, + 0x66, 0xe0, 0xc4, 0x7e, 0x8b, 0x60, 0x06, 0xa1, 0x18, 0x51, 0x2c, 0xec, 0x18, 0xd3, 0x1f, 0x72, + 0xe2, 0x89, 0xf6, 0x04, 0x0c, 0xb6, 0xc8, 0x1e, 0x69, 0x25, 0x13, 0x25, 0x5d, 0xa7, 0x85, 0x98, + 0xc3, 0xec, 0x9f, 0x1f, 0x80, 0xb3, 0x85, 0x41, 0x5d, 0xe8, 0x73, 0x68, 0xdb, 0x89, 0xc8, 0x1d, + 0x67, 0x3f, 0x99, 0xd1, 0xe4, 0x0a, 0x2f, 0xc6, 0x12, 0xce, 0x3c, 0xc9, 0x78, 0x60, 0xf2, 0x84, + 0x10, 0x51, 0xc4, 0x23, 0x17, 0x50, 0x53, 0x28, 0x55, 0x3e, 0x0a, 0xa1, 0xd4, 0x73, 0x00, 0x61, + 0xd8, 0xe2, 0x86, 0x59, 0x4d, 0xe1, 0xa2, 0x16, 0x07, 0xb0, 0xaf, 0x5f, 0x17, 0x10, 0xac, 0x61, + 0xa1, 0x2a, 0x4c, 0x75, 0x02, 0x3f, 0xe2, 0x32, 0xd9, 0x2a, 0xb7, 0x5d, 0x1c, 0x34, 0xe3, 0x69, + 0xd4, 0x12, 0x70, 0x9c, 0xaa, 0x81, 0x5e, 0x84, 0x51, 0x11, 0x63, 0xa3, 0xe6, 0xfb, 0x2d, 0x21, + 0x06, 0x52, 0xe6, 0x7c, 0xf5, 0x18, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, 0xf4, 0x0e, 0x67, 0x56, + 0xe3, 0xc2, 0x5e, 0x0d, 0x2f, 0x11, 0x89, 0x75, 0xa4, 0xaf, 0x48, 0xac, 0xb1, 0x60, 0xac, 0xd2, + 0xb7, 0xde, 0x11, 0x7a, 0x8a, 0x92, 0x7e, 0x76, 0x00, 0x4e, 0x88, 0x85, 0xf3, 0xb0, 0x97, 0xcb, + 0xcd, 0xf4, 0x72, 0x39, 0x0a, 0xd1, 0xd9, 0xbb, 0x6b, 0xe6, 0xb8, 0xd7, 0xcc, 0x0f, 0x5b, 0x60, + 0xb2, 0x57, 0xe8, 0xff, 0xcb, 0x4d, 0x09, 0xf5, 0x62, 0x2e, 0xbb, 0xa6, 0xa2, 0x7a, 0xbe, 0xcd, + 0xe4, 0x50, 0xf6, 0x7f, 0xb4, 0xe0, 0xf1, 0x9e, 0x14, 0xd1, 0x32, 0x54, 0x18, 0x0f, 0xa8, 0xbd, + 0xce, 0x9e, 0x54, 0xb6, 0xcd, 0x12, 0x90, 0xc3, 0x92, 0xc6, 0x35, 0xd1, 0x72, 0x2a, 0xf7, 0xd6, + 0x53, 0x19, 0xb9, 0xb7, 0x4e, 0x19, 0xc3, 0xf3, 0x80, 0xc9, 0xb7, 0x7e, 0x90, 0xde, 0x38, 0x86, + 0x5f, 0x11, 0xfa, 0xa0, 0x21, 0xf6, 0xb3, 0x13, 0x62, 0x3f, 0x64, 0x62, 0x6b, 0x77, 0xc8, 0x47, + 0x61, 0x8a, 0x05, 0xdf, 0x62, 0x96, 0xf6, 0xc2, 0xe3, 0xa9, 0x14, 0x5b, 0xd3, 0x5e, 0x4f, 0xc0, + 0x70, 0x0a, 0xdb, 0xfe, 0xc3, 0x32, 0x0c, 0xf1, 0xed, 0x77, 0x0c, 0x6f, 0xc2, 0xa7, 0xa1, 0xe2, + 0xb6, 0xdb, 0x5d, 0x9e, 0x4e, 0x69, 0x90, 0xfb, 0x46, 0xd3, 0x79, 0x5a, 0x95, 0x85, 0x38, 0x86, + 0xa3, 0x15, 0x21, 0x71, 0x2e, 0x88, 0xef, 0xc9, 0x3b, 0x3e, 0x5f, 0x75, 0x22, 0x87, 0x33, 0x38, + 0xea, 0x9e, 0x8d, 0x65, 0xd3, 0xe8, 0x53, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x36, 0x2d, 0x13, 0x61, + 0x85, 0xdf, 0x5f, 0x40, 0xad, 0xae, 0x90, 0x39, 0xcd, 0xf8, 0xcc, 0x51, 0x00, 0xac, 0x51, 0x44, + 0xf3, 0xc6, 0x4d, 0x3f, 0x9b, 0x98, 0x3b, 0xe0, 0x54, 0xe3, 0x39, 0x9b, 0xfd, 0x10, 0x54, 0x14, + 0xf1, 0x5e, 0xf2, 0xa7, 0x31, 0x9d, 0x2d, 0xfa, 0x08, 0x4c, 0x26, 0xfa, 0x76, 0x28, 0xf1, 0xd5, + 0x2f, 0x58, 0x30, 0xc9, 0x3b, 0xb3, 0xec, 0xed, 0x89, 0xdb, 0xe0, 0x2d, 0x38, 0xd9, 0xca, 0x38, + 0x95, 0xc5, 0xf4, 0xf7, 0x7f, 0x8a, 0x2b, 0x71, 0x55, 0x16, 0x14, 0x67, 0xb6, 0x81, 0x2e, 0xd2, + 0x1d, 0x47, 0x4f, 0x5d, 0xa7, 0x25, 0x5c, 0xa5, 0xc7, 0xf8, 0x6e, 0xe3, 0x65, 0x58, 0x41, 0xed, + 0xdf, 0xb1, 0x60, 0x9a, 0xf7, 0xfc, 0x1a, 0xd9, 0x57, 0x67, 0xd3, 0x37, 0xb2, 0xef, 0x22, 0x91, + 0x5f, 0x29, 0x27, 0x91, 0x9f, 0xfe, 0x69, 0xe5, 0xc2, 0x4f, 0xfb, 0xb2, 0x05, 0x62, 0x85, 0x1c, + 0x83, 0x10, 0xe2, 0xdb, 0x4d, 0x21, 0xc4, 0x6c, 0xfe, 0x26, 0xc8, 0x91, 0x3e, 0xfc, 0x99, 0x05, + 0x53, 0x1c, 0x21, 0xd6, 0x96, 0x7f, 0x43, 0xe7, 0xa1, 0x9f, 0x74, 0xdf, 0xd7, 0xc8, 0xfe, 0x86, + 0x5f, 0x73, 0xa2, 0x9d, 0xec, 0x8f, 0x32, 0x26, 0x6b, 0xa0, 0x70, 0xb2, 0x9a, 0x72, 0x03, 0x19, + 0x79, 0x6e, 0x7a, 0xc4, 0x8f, 0x38, 0x6c, 0x9e, 0x1b, 0xfb, 0xeb, 0x16, 0x20, 0xde, 0x8c, 0xc1, + 0xb8, 0x51, 0x76, 0x88, 0x95, 0x6a, 0x17, 0x5d, 0x7c, 0x34, 0x29, 0x08, 0xd6, 0xb0, 0x8e, 0x64, + 0x78, 0x12, 0x26, 0x0f, 0xe5, 0xde, 0x26, 0x0f, 0x87, 0x18, 0xd1, 0x7f, 0x33, 0x04, 0x49, 0xdf, + 0x2a, 0x74, 0x0b, 0xc6, 0x1a, 0x4e, 0xc7, 0xd9, 0x74, 0x5b, 0x6e, 0xe4, 0x92, 0xb0, 0xc8, 0x1e, + 0x6a, 0x49, 0xc3, 0x13, 0x4a, 0x6a, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x3c, 0x40, 0x27, 0x70, 0xf7, + 0xdc, 0x16, 0xd9, 0x66, 0xb2, 0x12, 0x16, 0x9c, 0x81, 0x1b, 0x67, 0xc9, 0x52, 0xac, 0x61, 0x64, + 0x38, 0xb2, 0x97, 0x1f, 0xb2, 0x23, 0x3b, 0x1c, 0x9b, 0x23, 0xfb, 0xc0, 0xa1, 0x1c, 0xd9, 0x47, + 0x0e, 0xed, 0xc8, 0x3e, 0xd8, 0x97, 0x23, 0x3b, 0x86, 0xd3, 0x92, 0xf7, 0xa4, 0xff, 0x57, 0xdc, + 0x16, 0x11, 0x0f, 0x0e, 0x1e, 0x51, 0x62, 0xf6, 0xde, 0xc1, 0xdc, 0x69, 0x9c, 0x89, 0x81, 0x73, + 0x6a, 0xa2, 0x8f, 0xc1, 0x8c, 0xd3, 0x6a, 0xf9, 0x77, 0xd4, 0xa4, 0x2e, 0x87, 0x0d, 0xa7, 0xc5, + 0x95, 0x10, 0xc3, 0x8c, 0xea, 0x63, 0xf7, 0x0e, 0xe6, 0x66, 0x16, 0x72, 0x70, 0x70, 0x6e, 0x6d, + 0xf4, 0x61, 0xa8, 0x74, 0x02, 0xbf, 0xb1, 0xa6, 0x39, 0x80, 0x9e, 0xa3, 0x03, 0x58, 0x93, 0x85, + 0xf7, 0x0f, 0xe6, 0xc6, 0xd5, 0x1f, 0x76, 0xe1, 0xc7, 0x15, 0x32, 0x3c, 0xd3, 0x47, 0x8f, 0xd4, + 0x33, 0x7d, 0x17, 0x4e, 0xd4, 0x49, 0xe0, 0x3a, 0x2d, 0xf7, 0x2d, 0xca, 0x2f, 0xcb, 0xf3, 0x69, + 0x03, 0x2a, 0x41, 0xe2, 0x44, 0xee, 0x2b, 0xe6, 0xa6, 0x96, 0x70, 0x44, 0x9e, 0xc0, 0x31, 0x21, + 0xfb, 0x7f, 0x5b, 0x30, 0x2c, 0x7c, 0xa9, 0x8e, 0x81, 0x6b, 0x5c, 0x30, 0x34, 0x09, 0x73, 0xd9, + 0x03, 0xc6, 0x3a, 0x93, 0xab, 0x43, 0x58, 0x4d, 0xe8, 0x10, 0x1e, 0x2f, 0x22, 0x52, 0xac, 0x3d, + 0xf8, 0x6b, 0x65, 0xca, 0xbd, 0x1b, 0x5e, 0xbd, 0x0f, 0x7f, 0x08, 0xd6, 0x61, 0x38, 0x14, 0x5e, + 0xa5, 0xa5, 0x7c, 0x9f, 0x86, 0xe4, 0x24, 0xc6, 0x76, 0x6c, 0xc2, 0x8f, 0x54, 0x12, 0xc9, 0x74, + 0x57, 0x2d, 0x3f, 0x44, 0x77, 0xd5, 0x5e, 0x7e, 0xcf, 0x03, 0x47, 0xe1, 0xf7, 0x6c, 0x7f, 0x95, + 0xdd, 0x9c, 0x7a, 0xf9, 0x31, 0x30, 0x55, 0x57, 0xcc, 0x3b, 0xd6, 0x2e, 0x58, 0x59, 0xa2, 0x53, + 0x39, 0xcc, 0xd5, 0xcf, 0x59, 0x70, 0x36, 0xe3, 0xab, 0x34, 0x4e, 0xeb, 0x19, 0x18, 0x71, 0xba, + 0x4d, 0x57, 0xed, 0x65, 0x4d, 0x9f, 0xb8, 0x20, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x30, 0x4d, 0xee, + 0x76, 0x5c, 0xae, 0x4a, 0xd5, 0xcd, 0x7f, 0xcb, 0xdc, 0x01, 0x6f, 0x39, 0x09, 0xc4, 0x69, 0x7c, + 0x15, 0x6b, 0xa6, 0x9c, 0x1b, 0x6b, 0xe6, 0xef, 0x5a, 0x30, 0xaa, 0xfc, 0x2a, 0x1f, 0xfa, 0x68, + 0x7f, 0xd4, 0x1c, 0xed, 0x47, 0x0b, 0x46, 0x3b, 0x67, 0x98, 0x7f, 0xab, 0xa4, 0xfa, 0x5b, 0xf3, + 0x83, 0xa8, 0x0f, 0x0e, 0xee, 0xc1, 0x5d, 0x17, 0x2e, 0xc3, 0xa8, 0xd3, 0xe9, 0x48, 0x80, 0xb4, + 0x41, 0x63, 0x11, 0x94, 0xe3, 0x62, 0xac, 0xe3, 0x28, 0x4f, 0x8a, 0x72, 0xae, 0x27, 0x45, 0x13, + 0x20, 0x72, 0x82, 0x6d, 0x12, 0xd1, 0x32, 0x61, 0x32, 0x9b, 0x7f, 0xde, 0x74, 0x23, 0xb7, 0x35, + 0xef, 0x7a, 0x51, 0x18, 0x05, 0xf3, 0xab, 0x5e, 0x74, 0x23, 0xe0, 0x4f, 0x48, 0x2d, 0x5a, 0x93, + 0xa2, 0x85, 0x35, 0xba, 0x32, 0x86, 0x00, 0x6b, 0x63, 0xd0, 0x34, 0x66, 0x58, 0x17, 0xe5, 0x58, + 0x61, 0xd8, 0x1f, 0x62, 0xb7, 0x0f, 0x1b, 0xd3, 0xc3, 0x45, 0x2a, 0xfa, 0xfb, 0x63, 0x6a, 0x36, + 0x98, 0x26, 0xb3, 0xaa, 0xc7, 0x43, 0x2a, 0x3e, 0xec, 0x69, 0xc3, 0xba, 0x5f, 0x5f, 0x1c, 0x34, + 0x09, 0x7d, 0x47, 0xca, 0x40, 0xe5, 0xd9, 0x1e, 0xb7, 0xc6, 0x21, 0x4c, 0x52, 0x58, 0x3a, 0x15, + 0x96, 0x6c, 0x62, 0xb5, 0x26, 0xf6, 0x85, 0x96, 0x4e, 0x45, 0x00, 0x70, 0x8c, 0x43, 0x99, 0x29, + 0xf5, 0x27, 0x9c, 0x41, 0x71, 0x58, 0x51, 0x85, 0x1d, 0x62, 0x0d, 0x03, 0x5d, 0x12, 0x02, 0x05, + 0xae, 0x17, 0x78, 0x34, 0x21, 0x50, 0x90, 0xc3, 0xa5, 0x49, 0x81, 0x2e, 0xc3, 0xa8, 0xca, 0xa0, + 0x5d, 0xe3, 0x89, 0x8c, 0xc4, 0x32, 0x5b, 0x8e, 0x8b, 0xb1, 0x8e, 0x83, 0x36, 0x60, 0x32, 0xe4, + 0x72, 0x36, 0x15, 0xeb, 0x99, 0xcb, 0x2b, 0xdf, 0x2f, 0xad, 0x80, 0xea, 0x26, 0xf8, 0x3e, 0x2b, + 0xe2, 0xa7, 0x93, 0xf4, 0xf3, 0x4f, 0x92, 0x40, 0xaf, 0xc2, 0x44, 0xcb, 0x77, 0x9a, 0x8b, 0x4e, + 0xcb, 0xf1, 0x1a, 0x6c, 0x7c, 0x46, 0xcc, 0x44, 0xac, 0xd7, 0x0d, 0x28, 0x4e, 0x60, 0x53, 0xe6, + 0x4d, 0x2f, 0x11, 0xf1, 0xc9, 0x1d, 0x6f, 0x9b, 0x84, 0x22, 0x1f, 0x32, 0x63, 0xde, 0xae, 0xe7, + 0xe0, 0xe0, 0xdc, 0xda, 0xe8, 0x25, 0x18, 0x93, 0x9f, 0xaf, 0x85, 0xc5, 0x88, 0x9d, 0x52, 0x34, + 0x18, 0x36, 0x30, 0xd1, 0x1d, 0x38, 0x25, 0xff, 0x6f, 0x04, 0xce, 0xd6, 0x96, 0xdb, 0x10, 0xbe, + 0xe2, 0xdc, 0x7b, 0x75, 0x41, 0xba, 0x58, 0x2e, 0x67, 0x21, 0xdd, 0x3f, 0x98, 0x3b, 0x2f, 0x46, + 0x2d, 0x13, 0xce, 0x26, 0x31, 0x9b, 0x3e, 0x5a, 0x83, 0x13, 0x3b, 0xc4, 0x69, 0x45, 0x3b, 0x4b, + 0x3b, 0xa4, 0xb1, 0x2b, 0x37, 0x1d, 0x0b, 0xb6, 0xa1, 0x39, 0x70, 0x5c, 0x4d, 0xa3, 0xe0, 0xac, + 0x7a, 0xe8, 0x0d, 0x98, 0xe9, 0x74, 0x37, 0x5b, 0x6e, 0xb8, 0xb3, 0xee, 0x47, 0xcc, 0x14, 0x48, + 0x25, 0xe4, 0x16, 0x51, 0x39, 0x54, 0x38, 0x93, 0x5a, 0x0e, 0x1e, 0xce, 0xa5, 0x80, 0xde, 0x82, + 0x53, 0x89, 0xc5, 0x20, 0xe2, 0x12, 0x4c, 0xe4, 0x67, 0x7b, 0xa8, 0x67, 0x55, 0x10, 0x21, 0x3e, + 0xb2, 0x40, 0x38, 0xbb, 0x09, 0xf4, 0x32, 0x80, 0xdb, 0x59, 0x71, 0xda, 0x6e, 0x8b, 0x3e, 0x17, + 0x4f, 0xb0, 0x75, 0x42, 0x9f, 0x0e, 0xb0, 0x5a, 0x93, 0xa5, 0xf4, 0x7c, 0x16, 0xff, 0xf6, 0xb1, + 0x86, 0x8d, 0x6a, 0x30, 0x21, 0xfe, 0xed, 0x8b, 0x69, 0x9d, 0x56, 0x21, 0x00, 0x26, 0x64, 0x0d, + 0x35, 0x97, 0xc8, 0x2c, 0x61, 0xb3, 0x97, 0xa8, 0x8f, 0xb6, 0xe1, 0xac, 0xcc, 0xde, 0xa5, 0xaf, + 0x53, 0x39, 0x0f, 0x21, 0x4b, 0xb3, 0x30, 0xc2, 0xfd, 0x43, 0x16, 0x8a, 0x10, 0x71, 0x31, 0x1d, + 0x7a, 0xbf, 0xeb, 0xcb, 0x9d, 0x7b, 0xd0, 0x9e, 0xe2, 0xe6, 0x49, 0xf4, 0x7e, 0xbf, 0x9e, 0x04, + 0xe2, 0x34, 0x3e, 0x0a, 0xe1, 0x94, 0xeb, 0x65, 0xad, 0xee, 0xd3, 0x8c, 0xd0, 0x47, 0xb8, 0xf3, + 0x70, 0xf1, 0xca, 0xce, 0x84, 0xf3, 0x95, 0x9d, 0x49, 0xfb, 0xed, 0x59, 0xe1, 0xfd, 0xb6, 0x45, + 0x6b, 0x6b, 0x9c, 0x3a, 0xfa, 0x34, 0x8c, 0xe9, 0x1f, 0x26, 0xb8, 0x8e, 0x0b, 0xd9, 0x8c, 0xac, + 0x76, 0x3e, 0x70, 0x3e, 0x5f, 0x9d, 0x01, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x23, 0xc3, 0xcd, 0xfe, + 0x52, 0x7f, 0x5c, 0x4d, 0xff, 0x46, 0x68, 0x04, 0xb2, 0x97, 0x3d, 0xba, 0x0e, 0x23, 0x8d, 0x96, + 0x4b, 0xbc, 0x68, 0xb5, 0x56, 0x14, 0x4b, 0x6f, 0x49, 0xe0, 0x88, 0x7d, 0x24, 0xb2, 0x26, 0xf0, + 0x32, 0xac, 0x28, 0xd8, 0xbf, 0x5a, 0x82, 0xb9, 0x1e, 0x29, 0x38, 0x12, 0x2a, 0x29, 0xab, 0x2f, + 0x95, 0xd4, 0x82, 0xcc, 0x3a, 0xbf, 0x9e, 0x90, 0x76, 0x25, 0x32, 0xca, 0xc7, 0x32, 0xaf, 0x24, + 0x7e, 0xdf, 0x2e, 0x02, 0xba, 0x56, 0x6b, 0xa0, 0xa7, 0x93, 0x8b, 0xa1, 0xcd, 0x1e, 0xec, 0xff, + 0x09, 0x9c, 0xab, 0x99, 0xb4, 0xbf, 0x5a, 0x82, 0x53, 0x6a, 0x08, 0xbf, 0x75, 0x07, 0xee, 0x66, + 0x7a, 0xe0, 0x8e, 0x40, 0xaf, 0x6b, 0xdf, 0x80, 0x21, 0x1e, 0x1c, 0xb0, 0x0f, 0xd6, 0xfb, 0x09, + 0x33, 0xf8, 0xae, 0xe2, 0xf6, 0x8c, 0x00, 0xbc, 0xdf, 0x6f, 0xc1, 0x64, 0xc2, 0xd7, 0x0c, 0x61, + 0xcd, 0x21, 0xf9, 0x41, 0xd8, 0xe3, 0x2c, 0xc6, 0xfb, 0x3c, 0x0c, 0xec, 0xf8, 0x61, 0x94, 0x34, + 0xfa, 0xb8, 0xea, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x77, 0x2d, 0x18, 0xdc, 0x70, 0x5c, 0x2f, 0x92, + 0x0a, 0x02, 0x2b, 0x47, 0x41, 0xd0, 0xcf, 0x77, 0xa1, 0x17, 0x61, 0x88, 0x6c, 0x6d, 0x91, 0x46, + 0x24, 0x66, 0x55, 0x46, 0x73, 0x18, 0x5a, 0x66, 0xa5, 0x94, 0x17, 0x64, 0x8d, 0xf1, 0xbf, 0x58, + 0x20, 0xa3, 0xdb, 0x50, 0x89, 0xdc, 0x36, 0x59, 0x68, 0x36, 0x85, 0xda, 0xfc, 0x01, 0x22, 0x52, + 0x6c, 0x48, 0x02, 0x38, 0xa6, 0x65, 0x7f, 0xa1, 0x04, 0x10, 0x47, 0x55, 0xea, 0xf5, 0x89, 0x8b, + 0x29, 0x85, 0xea, 0x85, 0x0c, 0x85, 0x2a, 0x8a, 0x09, 0x66, 0x68, 0x53, 0xd5, 0x30, 0x95, 0xfb, + 0x1a, 0xa6, 0x81, 0xc3, 0x0c, 0xd3, 0x12, 0x4c, 0xc7, 0x51, 0xa1, 0xcc, 0xa0, 0x78, 0xec, 0xfa, + 0xdc, 0x48, 0x02, 0x71, 0x1a, 0xdf, 0x26, 0x70, 0x5e, 0x05, 0xc7, 0x11, 0x37, 0x1a, 0xb3, 0xca, + 0xd6, 0x15, 0xd4, 0x3d, 0xc6, 0x29, 0xd6, 0x18, 0x97, 0x72, 0x35, 0xc6, 0x3f, 0x69, 0xc1, 0xc9, + 0x64, 0x3b, 0xcc, 0x85, 0xf9, 0xf3, 0x16, 0x9c, 0x62, 0x7a, 0x73, 0xd6, 0x6a, 0x5a, 0x4b, 0xff, + 0x42, 0x61, 0xc0, 0x9f, 0x9c, 0x1e, 0xc7, 0x61, 0x43, 0xd6, 0xb2, 0x48, 0xe3, 0xec, 0x16, 0xed, + 0xff, 0x50, 0x82, 0x99, 0xbc, 0x48, 0x41, 0xcc, 0x69, 0xc3, 0xb9, 0x5b, 0xdf, 0x25, 0x77, 0x84, + 0x69, 0x7c, 0xec, 0xb4, 0xc1, 0x8b, 0xb1, 0x84, 0x27, 0xb3, 0x2a, 0x94, 0xfa, 0xcc, 0xaa, 0xb0, + 0x03, 0xd3, 0x77, 0x76, 0x88, 0x77, 0xd3, 0x0b, 0x9d, 0xc8, 0x0d, 0xb7, 0x5c, 0xa6, 0x63, 0xe6, + 0xeb, 0x46, 0xa6, 0x62, 0x9d, 0xbe, 0x9d, 0x44, 0xb8, 0x7f, 0x30, 0x77, 0xd6, 0x28, 0x88, 0xbb, + 0xcc, 0x0f, 0x12, 0x9c, 0x26, 0x9a, 0x4e, 0x4a, 0x31, 0xf0, 0x10, 0x93, 0x52, 0xd8, 0x9f, 0xb7, + 0xe0, 0x4c, 0x6e, 0x5e, 0x62, 0x74, 0x11, 0x46, 0x9c, 0x8e, 0xcb, 0xc5, 0xf4, 0xe2, 0x18, 0x65, + 0xe2, 0xa0, 0xda, 0x2a, 0x17, 0xd2, 0x2b, 0x28, 0x3d, 0xbd, 0x76, 0x5d, 0xaf, 0x99, 0x3c, 0xbd, + 0xae, 0xb9, 0x5e, 0x13, 0x33, 0x88, 0x3a, 0x8e, 0xcb, 0x79, 0xc7, 0xb1, 0xfd, 0x7d, 0x16, 0x08, + 0x87, 0xd3, 0x3e, 0xce, 0xee, 0x4f, 0xc0, 0xd8, 0x5e, 0x3a, 0x71, 0xd5, 0xf9, 0x7c, 0x0f, 0x5c, + 0x91, 0xae, 0x4a, 0x31, 0x64, 0x46, 0x92, 0x2a, 0x83, 0x96, 0xdd, 0x04, 0x01, 0xad, 0x12, 0x26, + 0x84, 0xee, 0xdd, 0x9b, 0xe7, 0x00, 0x9a, 0x0c, 0x97, 0x65, 0xb3, 0x2c, 0x99, 0x37, 0x73, 0x55, + 0x41, 0xb0, 0x86, 0x65, 0xff, 0xbb, 0x12, 0x8c, 0xca, 0x44, 0x49, 0x5d, 0xaf, 0x1f, 0x51, 0xd1, + 0xa1, 0x32, 0xa7, 0xa2, 0x4b, 0x50, 0x61, 0xb2, 0xcc, 0x5a, 0x2c, 0x61, 0x53, 0x92, 0x84, 0x35, + 0x09, 0xc0, 0x31, 0x0e, 0xdd, 0x45, 0x61, 0x77, 0x93, 0xa1, 0x27, 0xdc, 0x23, 0xeb, 0xbc, 0x18, + 0x4b, 0x38, 0xfa, 0x18, 0x4c, 0xf1, 0x7a, 0x81, 0xdf, 0x71, 0xb6, 0xb9, 0xfe, 0x63, 0x50, 0xc5, + 0x9c, 0x98, 0x5a, 0x4b, 0xc0, 0xee, 0x1f, 0xcc, 0x9d, 0x4c, 0x96, 0x31, 0xc5, 0x5e, 0x8a, 0x0a, + 0x33, 0x73, 0xe2, 0x8d, 0xd0, 0xdd, 0x9f, 0xb2, 0x8e, 0x8a, 0x41, 0x58, 0xc7, 0xb3, 0x3f, 0x0d, + 0x28, 0x9d, 0x32, 0x0a, 0xbd, 0xc6, 0x6d, 0x5b, 0xdd, 0x80, 0x34, 0x8b, 0x14, 0x7d, 0x7a, 0x64, + 0x05, 0xe9, 0xd9, 0xc4, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0x8b, 0x65, 0x98, 0x4a, 0xfa, 0x72, 0xa3, + 0xab, 0x30, 0xc4, 0x59, 0x0f, 0x41, 0xbe, 0xc0, 0x8e, 0x44, 0xf3, 0x00, 0x67, 0x87, 0xb0, 0xe0, + 0x5e, 0x44, 0x7d, 0xf4, 0x06, 0x8c, 0x36, 0xfd, 0x3b, 0xde, 0x1d, 0x27, 0x68, 0x2e, 0xd4, 0x56, + 0xc5, 0x72, 0xce, 0x7c, 0xd8, 0x56, 0x63, 0x34, 0xdd, 0xab, 0x9c, 0xe9, 0x4c, 0x63, 0x10, 0xd6, + 0xc9, 0xa1, 0x0d, 0x16, 0x67, 0x7e, 0xcb, 0xdd, 0x5e, 0x73, 0x3a, 0x45, 0x8e, 0x0e, 0x4b, 0x12, + 0x49, 0xa3, 0x3c, 0x2e, 0x82, 0xd1, 0x73, 0x00, 0x8e, 0x09, 0xa1, 0xcf, 0xc2, 0x89, 0x30, 0x47, + 0xdc, 0x9e, 0x97, 0x41, 0xb0, 0x48, 0x02, 0xbd, 0xf8, 0xc8, 0xbd, 0x83, 0xb9, 0x13, 0x59, 0x82, + 0xf9, 0xac, 0x66, 0xec, 0x2f, 0x9e, 0x04, 0x63, 0x13, 0x1b, 0x09, 0x65, 0xad, 0x23, 0x4a, 0x28, + 0x8b, 0x61, 0x84, 0xb4, 0x3b, 0xd1, 0x7e, 0xd5, 0x0d, 0x8a, 0xd2, 0xea, 0x2f, 0x0b, 0x9c, 0x34, + 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x76, 0xd6, 0xdf, 0xf2, 0x37, 0x30, 0xeb, 0xef, 0xc0, 0x31, 0x66, + 0xfd, 0x5d, 0x87, 0xe1, 0x6d, 0x37, 0xc2, 0xa4, 0xe3, 0x0b, 0xa6, 0x3f, 0x73, 0x1d, 0x5e, 0xe1, + 0x28, 0xe9, 0xfc, 0x92, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x9a, 0xda, 0x81, 0x43, 0xf9, 0x0f, 0xf3, + 0xb4, 0xc1, 0x43, 0xe6, 0x1e, 0x14, 0xb9, 0x7d, 0x87, 0x1f, 0x34, 0xb7, 0xef, 0x8a, 0xcc, 0xc8, + 0x3b, 0x92, 0xef, 0x95, 0xc4, 0x12, 0xee, 0xf6, 0xc8, 0xc3, 0x7b, 0x4b, 0xcf, 0x62, 0x5c, 0xc9, + 0x3f, 0x09, 0x54, 0x82, 0xe2, 0x3e, 0x73, 0x17, 0x7f, 0x9f, 0x05, 0xa7, 0x3a, 0x59, 0x09, 0xbd, + 0x85, 0x6d, 0xc0, 0x8b, 0x7d, 0xe7, 0x0c, 0x37, 0x1a, 0x64, 0x32, 0xb5, 0xec, 0xac, 0xf0, 0xd9, + 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x9b, 0x42, 0x47, 0xfd, 0x44, 0x4e, 0x12, 0xe4, 0x82, 0xd4, 0xc7, + 0x1b, 0x19, 0x09, 0x77, 0xdf, 0x9b, 0x97, 0x70, 0xb7, 0xef, 0x34, 0xbb, 0xaf, 0xa9, 0xf4, 0xc7, + 0xe3, 0xf9, 0x4b, 0x89, 0x27, 0x37, 0xee, 0x99, 0xf4, 0xf8, 0x35, 0x95, 0xf4, 0xb8, 0x20, 0x1e, + 0x30, 0x4f, 0x69, 0xdc, 0x33, 0xd5, 0xb1, 0x96, 0xae, 0x78, 0xf2, 0x68, 0xd2, 0x15, 0x1b, 0x57, + 0x0d, 0xcf, 0x98, 0xfb, 0x74, 0x8f, 0xab, 0xc6, 0xa0, 0x5b, 0x7c, 0xd9, 0xf0, 0xd4, 0xcc, 0xd3, + 0x0f, 0x94, 0x9a, 0xf9, 0x96, 0x9e, 0xea, 0x18, 0xf5, 0xc8, 0xe5, 0x4b, 0x91, 0xfa, 0x4c, 0x70, + 0x7c, 0x4b, 0xbf, 0x00, 0x4f, 0xe4, 0xd3, 0x55, 0xf7, 0x5c, 0x9a, 0x6e, 0xe6, 0x15, 0x98, 0x4a, + 0x9c, 0x7c, 0xf2, 0x78, 0x12, 0x27, 0x9f, 0x3a, 0xf2, 0xc4, 0xc9, 0xa7, 0x8f, 0x21, 0x71, 0xf2, + 0x23, 0xc7, 0x98, 0x38, 0xf9, 0x16, 0x33, 0xa8, 0xe1, 0x61, 0x7b, 0x44, 0xfc, 0xe2, 0xa7, 0x72, + 0xa2, 0x5e, 0xa5, 0x63, 0xfb, 0xf0, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0xca, 0x48, 0xc8, 0x3c, 0xf3, + 0x10, 0x12, 0x32, 0xaf, 0xc7, 0x09, 0x99, 0xcf, 0xe4, 0x4f, 0x75, 0x86, 0x0b, 0x46, 0x4e, 0x1a, + 0xe6, 0x5b, 0x7a, 0xfa, 0xe4, 0x47, 0x0b, 0xb4, 0x26, 0x59, 0x82, 0xc7, 0x82, 0xa4, 0xc9, 0xaf, + 0xf2, 0xa4, 0xc9, 0x8f, 0xe5, 0x9f, 0xe4, 0xc9, 0xeb, 0xce, 0x48, 0x95, 0x4c, 0xfb, 0xa5, 0xc2, + 0x5e, 0xb2, 0x48, 0xcd, 0x39, 0xfd, 0x52, 0x71, 0x33, 0xd3, 0xfd, 0x52, 0x20, 0x1c, 0x93, 0xb2, + 0x7f, 0xa0, 0x04, 0xe7, 0x8a, 0xf7, 0x5b, 0x2c, 0x4d, 0xad, 0xc5, 0x4a, 0xe4, 0x84, 0x34, 0x95, + 0xbf, 0xd9, 0x62, 0xac, 0xbe, 0xa3, 0xf8, 0x5d, 0x81, 0x69, 0xe5, 0xbb, 0xd1, 0x72, 0x1b, 0xfb, + 0xeb, 0xf1, 0xcb, 0x57, 0xf9, 0xbb, 0xd7, 0x93, 0x08, 0x38, 0x5d, 0x07, 0x2d, 0xc0, 0xa4, 0x51, + 0xb8, 0x5a, 0x15, 0x6f, 0x33, 0x25, 0xbe, 0xad, 0x9b, 0x60, 0x9c, 0xc4, 0xb7, 0xbf, 0x64, 0xc1, + 0x23, 0x39, 0x19, 0x07, 0xfb, 0x0e, 0x52, 0xb7, 0x05, 0x93, 0x1d, 0xb3, 0x6a, 0x8f, 0xb8, 0x9a, + 0x46, 0x5e, 0x43, 0xd5, 0xd7, 0x04, 0x00, 0x27, 0x89, 0xda, 0x3f, 0x5d, 0x82, 0xb3, 0x85, 0xc6, + 0x88, 0x08, 0xc3, 0xe9, 0xed, 0x76, 0xe8, 0x2c, 0x05, 0xa4, 0x49, 0xbc, 0xc8, 0x75, 0x5a, 0xf5, + 0x0e, 0x69, 0x68, 0xf2, 0x70, 0x66, 0xd5, 0x77, 0x65, 0xad, 0xbe, 0x90, 0xc6, 0xc0, 0x39, 0x35, + 0xd1, 0x0a, 0xa0, 0x34, 0x44, 0xcc, 0x30, 0x8b, 0xf9, 0x9d, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x0f, + 0xc1, 0xb8, 0x32, 0x72, 0xd4, 0x66, 0x9c, 0x1d, 0xec, 0x58, 0x07, 0x60, 0x13, 0x0f, 0x5d, 0xe6, + 0x41, 0xe3, 0x45, 0x7a, 0x01, 0x21, 0x3c, 0x9f, 0x94, 0x11, 0xe1, 0x45, 0x31, 0xd6, 0x71, 0x16, + 0x2f, 0xfe, 0xda, 0xef, 0x9f, 0x7b, 0xcf, 0x6f, 0xfe, 0xfe, 0xb9, 0xf7, 0xfc, 0xce, 0xef, 0x9f, + 0x7b, 0xcf, 0x77, 0xdd, 0x3b, 0x67, 0xfd, 0xda, 0xbd, 0x73, 0xd6, 0x6f, 0xde, 0x3b, 0x67, 0xfd, + 0xce, 0xbd, 0x73, 0xd6, 0xef, 0xdd, 0x3b, 0x67, 0x7d, 0xe1, 0x0f, 0xce, 0xbd, 0xe7, 0x13, 0xa5, + 0xbd, 0xcb, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xc9, 0x26, 0x9f, 0xc5, 0x07, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -10507,6 +10605,39 @@ func (m *GCEPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } +func (m *GRPCAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GRPCAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GRPCAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Service != nil { + i -= len(*m.Service) + copy(dAtA[i:], *m.Service) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Service))) + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -10729,65 +10860,6 @@ func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Handler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Handler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Handler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TCPSocket != nil { - { - size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.HTTPGet != nil { - { - size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Exec != nil { - { - size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *HostAlias) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11141,6 +11213,65 @@ func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *LifecycleHandler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LifecycleHandler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LifecycleHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TCPSocket != nil { + { + size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.HTTPGet != nil { + { + size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Exec != nil { + { + size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *LimitRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13251,6 +13382,42 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l + if m.ResizeStatus != nil { + i -= len(*m.ResizeStatus) + copy(dAtA[i:], *m.ResizeStatus) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResizeStatus))) + i-- + dAtA[i] = 0x32 + } + if len(m.AllocatedResources) > 0 { + keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources)) + for k := range m.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- { + v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatedResources[iNdEx]) + copy(dAtA[i:], keysForAllocatedResources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResources[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } if len(m.Conditions) > 0 { for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -14590,6 +14757,34 @@ func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PodOS) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodOS) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodOS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -14843,6 +15038,20 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.OS != nil { + { + size, err := m.OS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } if m.SetHostnameAsFQDN != nil { i-- if *m.SetHostnameAsFQDN { @@ -15794,7 +16003,7 @@ func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x10 { - size, err := m.Handler.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ProbeHandler.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -15806,6 +16015,77 @@ func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ProbeHandler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProbeHandler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProbeHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.GRPC != nil { + { + size, err := m.GRPC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TCPSocket != nil { + { + size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.HTTPGet != nil { + { + size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Exec != nil { + { + size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -20762,6 +21042,20 @@ func (m *GCEPersistentDiskVolumeSource) Size() (n int) { return n } +func (m *GRPCAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + if m.Service != nil { + l = len(*m.Service) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *GitRepoVolumeSource) Size() (n int) { if m == nil { return 0 @@ -20845,27 +21139,6 @@ func (m *HTTPHeader) Size() (n int) { return n } -func (m *Handler) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HTTPGet != nil { - l = m.HTTPGet.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TCPSocket != nil { - l = m.TCPSocket.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *HostAlias) Size() (n int) { if m == nil { return 0 @@ -21001,6 +21274,27 @@ func (m *Lifecycle) Size() (n int) { return n } +func (m *LifecycleHandler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *LimitRange) Size() (n int) { if m == nil { return 0 @@ -21792,6 +22086,19 @@ func (m *PersistentVolumeClaimStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.AllocatedResources) > 0 { + for k, v := range m.AllocatedResources { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ResizeStatus != nil { + l = len(*m.ResizeStatus) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22247,6 +22554,17 @@ func (m *PodLogOptions) Size() (n int) { return n } +func (m *PodOS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodPortForwardOptions) Size() (n int) { if m == nil { return 0 @@ -22483,6 +22801,10 @@ func (m *PodSpec) Size() (n int) { if m.SetHostnameAsFQDN != nil { n += 3 } + if m.OS != nil { + l = m.OS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -22677,7 +22999,7 @@ func (m *Probe) Size() (n int) { } var l int _ = l - l = m.Handler.Size() + l = m.ProbeHandler.Size() n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) @@ -22690,6 +23012,31 @@ func (m *Probe) Size() (n int) { return n } +func (m *ProbeHandler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GRPC != nil { + l = m.GRPC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *ProjectedVolumeSource) Size() (n int) { if m == nil { return 0 @@ -24993,6 +25340,17 @@ func (this *GCEPersistentDiskVolumeSource) String() string { }, "") return s } +func (this *GRPCAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GRPCAction{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Service:` + valueToStringGenerated(this.Service) + `,`, + `}`, + }, "") + return s +} func (this *GitRepoVolumeSource) String() string { if this == nil { return "nil" @@ -25060,18 +25418,6 @@ func (this *HTTPHeader) String() string { }, "") return s } -func (this *Handler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Handler{`, - `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, - `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, - `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, - `}`, - }, "") - return s -} func (this *HostAlias) String() string { if this == nil { return "nil" @@ -25151,8 +25497,20 @@ func (this *Lifecycle) String() string { return "nil" } s := strings.Join([]string{`&Lifecycle{`, - `PostStart:` + strings.Replace(this.PostStart.String(), "Handler", "Handler", 1) + `,`, - `PreStop:` + strings.Replace(this.PreStop.String(), "Handler", "Handler", 1) + `,`, + `PostStart:` + strings.Replace(this.PostStart.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`, + `PreStop:` + strings.Replace(this.PreStop.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LifecycleHandler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LifecycleHandler{`, + `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, `}`, }, "") return s @@ -25812,11 +26170,23 @@ func (this *PersistentVolumeClaimStatus) String() string { mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) } mapStringForCapacity += "}" + keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) + for k := range this.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + mapStringForAllocatedResources := "ResourceList{" + for _, k := range keysForAllocatedResources { + mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)]) + } + mapStringForAllocatedResources += "}" s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, `Capacity:` + mapStringForCapacity + `,`, `Conditions:` + repeatedStringForConditions + `,`, + `AllocatedResources:` + mapStringForAllocatedResources + `,`, + `ResizeStatus:` + valueToStringGenerated(this.ResizeStatus) + `,`, `}`, }, "") return s @@ -26124,6 +26494,16 @@ func (this *PodLogOptions) String() string { }, "") return s } +func (this *PodOS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodOS{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *PodPortForwardOptions) String() string { if this == nil { return "nil" @@ -26293,6 +26673,7 @@ func (this *PodSpec) String() string { `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, `SetHostnameAsFQDN:` + valueToStringGenerated(this.SetHostnameAsFQDN) + `,`, + `OS:` + strings.Replace(this.OS.String(), "PodOS", "PodOS", 1) + `,`, `}`, }, "") return s @@ -26456,7 +26837,7 @@ func (this *Probe) String() string { return "nil" } s := strings.Join([]string{`&Probe{`, - `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `ProbeHandler:` + strings.Replace(strings.Replace(this.ProbeHandler.String(), "ProbeHandler", "ProbeHandler", 1), `&`, ``, 1) + `,`, `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, @@ -26467,6 +26848,19 @@ func (this *Probe) String() string { }, "") return s } +func (this *ProbeHandler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProbeHandler{`, + `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `GRPC:` + strings.Replace(this.GRPC.String(), "GRPCAction", "GRPCAction", 1) + `,`, + `}`, + }, "") + return s +} func (this *ProjectedVolumeSource) String() string { if this == nil { return "nil" @@ -38625,6 +39019,108 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *GRPCAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GRPCAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GRPCAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Service = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -39399,164 +39895,6 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } return nil } -func (m *Handler) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Handler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Exec == nil { - m.Exec = &ExecAction{} - } - if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTPGet == nil { - m.HTTPGet = &HTTPGetAction{} - } - if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TCPSocket == nil { - m.TCPSocket = &TCPSocketAction{} - } - if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *HostAlias) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -40695,7 +41033,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.PostStart == nil { - m.PostStart = &Handler{} + m.PostStart = &LifecycleHandler{} } if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40731,7 +41069,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.PreStop == nil { - m.PreStop = &Handler{} + m.PreStop = &LifecycleHandler{} } if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40758,6 +41096,164 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } return nil } +func (m *LifecycleHandler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LifecycleHandler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LifecycleHandler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exec == nil { + m.Exec = &ExecAction{} + } + if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTPGet == nil { + m.HTTPGet = &HTTPGetAction{} + } + if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCPSocket == nil { + m.TCPSocket = &TCPSocketAction{} + } + if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *LimitRange) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -47741,6 +48237,168 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocatedResources == nil { + m.AllocatedResources = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AllocatedResources[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizeStatus", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeClaimResizeStatus(dAtA[iNdEx:postIndex]) + m.ResizeStatus = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51535,6 +52193,88 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } return nil } +func (m *PodOS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodOS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodOS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = OSName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -53550,6 +54290,42 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.SetHostnameAsFQDN = &b + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OS == nil { + m.OS = &PodOS{} + } + if err := m.OS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -55180,7 +55956,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProbeHandler", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -55207,7 +55983,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Handler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ProbeHandler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -55347,6 +56123,200 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } return nil } +func (m *ProbeHandler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProbeHandler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProbeHandler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exec == nil { + m.Exec = &ExecAction{} + } + if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTPGet == nil { + m.HTTPGet = &HTTPGetAction{} + } + if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCPSocket == nil { + m.TCPSocket = &TCPSocketAction{} + } + if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GRPC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GRPC == nil { + m.GRPC = &GRPCAction{} + } + if err := m.GRPC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index ff63fd29f..b5b44781f 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -908,15 +908,11 @@ message ContainerStatus { // Specifies whether the container has passed its readiness probe. optional bool ready = 4; - // The number of times the container has been restarted, currently based on - // the number of dead containers that have not yet been removed. - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. + // The number of times the container has been restarted. optional int32 restartCount = 5; // The image the container is running. - // More info: https://kubernetes.io/docs/concepts/containers/images - // TODO(dchen1107): Which image the container is running with? + // More info: https://kubernetes.io/docs/concepts/containers/images. optional string image = 6; // ImageID of the container's image. @@ -1190,15 +1186,16 @@ message EnvVarSource { optional SecretKeySelector secretKeyRef = 4; } -// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// An EphemeralContainer is a temporary container that you may add to an existing Pod for // user-initiated activities such as debugging. Ephemeral containers have no resource or -// scheduling guarantees, and they will not be restarted when they exit or when a pod is -// removed or restarted. If an ephemeral container causes a pod to exceed its resource -// allocation, the pod may be evicted. -// Ephemeral containers may not be added by directly updating the pod spec. They must be added -// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec -// once added. -// This is an alpha feature enabled by the EphemeralContainers feature flag. +// scheduling guarantees, and they will not be restarted when they exit or when a Pod is +// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the +// Pod to exceed its resource allocation. +// +// To add an ephemeral container, use the ephemeralcontainers subresource of an existing +// Pod. Ephemeral containers may not be removed or restarted. +// +// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate. message EphemeralContainer { // Ephemeral containers have all of the fields of Container, plus additional fields // specific to ephemeral containers. Fields in common with Container are in the @@ -1208,8 +1205,10 @@ message EphemeralContainer { // If set, the name of the container from PodSpec that this ephemeral container targets. // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - // If not set then the ephemeral container is run in whatever namespaces are shared - // for the pod. Note that the container runtime must support this feature. + // If not set then the ephemeral container uses the namespaces configured in the Pod spec. + // + // The container runtime must implement support for this feature. If the runtime does not + // support namespace targeting then the result of setting this field is undefined. // +optional optional string targetContainerName = 2; } @@ -1257,6 +1256,12 @@ message EphemeralContainerCommon { optional string workingDir = 5; // Ports are not allowed for ephemeral containers. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. @@ -1280,7 +1285,7 @@ message EphemeralContainerCommon { // +optional optional ResourceRequirements resources = 8; - // Pod volumes to mount into the container's filesystem. + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional // +patchMergeKey=mountPath @@ -1641,6 +1646,19 @@ message GCEPersistentDiskVolumeSource { optional bool readOnly = 4; } +message GRPCAction { + // Port number of the gRPC service. Number must be in the range 1 to 65535. + optional int32 port = 1; + + // Service is the name of the service to place in the gRPC HealthCheckRequest + // (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + // + // If this is not specified, the default behavior is defined by gRPC. + // +optional + // +default="" + optional string service = 2; +} + // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. @@ -1741,25 +1759,6 @@ message HTTPHeader { optional string value = 2; } -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -message Handler { - // One and only one of the following should be specified. - // Exec specifies the action to take. - // +optional - optional ExecAction exec = 1; - - // HTTPGet specifies the http request to perform. - // +optional - optional HTTPGetAction httpGet = 2; - - // TCPSocket specifies an action involving a TCP port. - // TCP hooks not yet supported - // TODO: implement a realistic TCP lifecycle hook - // +optional - optional TCPSocketAction tcpSocket = 3; -} - // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the // pod's hosts file. message HostAlias { @@ -1927,20 +1926,37 @@ message Lifecycle { // Other management of the container blocks until the hook completes. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional - optional Handler postStart = 1; + optional LifecycleHandler postStart = 1; // PreStop is called immediately before a container is terminated due to an // API request or management event such as liveness/startup probe failure, // preemption, resource contention, etc. The handler is not called if the - // container crashes or exits. The reason for termination is passed to the - // handler. The Pod's termination grace period countdown begins before the - // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container crashes or exits. The Pod's termination grace period countdown begins before the + // PreStop hook is executed. Regardless of the outcome of the handler, the // container will eventually terminate within the Pod's termination grace - // period. Other management of the container blocks until the hook completes + // period (unless delayed by finalizers). Other management of the container blocks until the hook completes // or until the termination grace period is reached. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional - optional Handler preStop = 2; + optional LifecycleHandler preStop = 2; +} + +// LifecycleHandler defines a specific action that should be taken in a lifecycle +// hook. One and only one of the fields, except TCPSocket must be specified. +message LifecycleHandler { + // Exec specifies the action to take. + // +optional + optional ExecAction exec = 1; + + // HTTPGet specifies the http request to perform. + // +optional + optional HTTPGetAction httpGet = 2; + + // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + // for the backward compatibility. There are no validation of this field and + // lifecycle hooks will fail in runtime when tcp handler is specified. + // +optional + optional TCPSocketAction tcpSocket = 3; } // LimitRange sets resource usage limits for each kind of resource in a Namespace. @@ -2059,7 +2075,7 @@ message LocalVolumeSource { // Filesystem type to mount. // It applies only when the Path is a block device. // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. + // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified. // +optional optional string fsType = 2; } @@ -2662,6 +2678,9 @@ message PersistentVolumeClaimSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // Resources represents the minimum resources the volume should have. + // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + // that are lower than previous value but must still be higher than capacity recorded in the + // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional optional ResourceRequirements resources = 2; @@ -2732,6 +2751,26 @@ message PersistentVolumeClaimStatus { // +patchMergeKey=type // +patchStrategy=merge repeated PersistentVolumeClaimCondition conditions = 4; + + // The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may + // be larger than the actual capacity when a volume expansion operation is requested. + // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. + // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. + // If a volume expansion capacity request is lowered, allocatedResources is only + // lowered if there are no expansion operations in progress and if the actual volume capacity + // is equal or lower than the requested capacity. + // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. + // +featureGate=RecoverVolumeExpansionFailure + // +optional + map allocatedResources = 5; + + // ResizeStatus stores status of resize operation. + // ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty + // string by resize controller or kubelet. + // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. + // +featureGate=RecoverVolumeExpansionFailure + // +optional + optional string resizeStatus = 6; } // PersistentVolumeClaimTemplate is used to produce @@ -3176,12 +3215,10 @@ message PodExecOptions { optional bool stdin = 1; // Redirect the standard output stream of the pod for this call. - // Defaults to true. // +optional optional bool stdout = 2; // Redirect the standard error stream of the pod for this call. - // Defaults to true. // +optional optional bool stderr = 3; @@ -3273,6 +3310,15 @@ message PodLogOptions { optional bool insecureSkipTLSVerifyBackend = 9; } +// PodOS defines the OS parameters of a pod. +message PodOS { + // Name is the name of the operating system. The currently supported values are linux and windows. + // Additional value may be defined in future and can be one of: + // https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + // Clients should expect to handle additional values and treat unrecognized values in this field as os: null + optional string name = 1; +} + // PodPortForwardOptions is the query options to a Pod's port forward call // when using WebSockets. // The `port` query parameter must specify the port or @@ -3308,12 +3354,14 @@ message PodSecurityContext { // container. May also be set in SecurityContext. If set in // both SecurityContext and PodSecurityContext, the value specified in SecurityContext // takes precedence for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional SELinuxOptions seLinuxOptions = 1; // The Windows specific settings applied to all containers. // If unspecified, the options within a container's SecurityContext will be used. // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is linux. // +optional optional WindowsSecurityContextOptions windowsOptions = 8; @@ -3322,6 +3370,7 @@ message PodSecurityContext { // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence // for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional int64 runAsUser = 2; @@ -3330,6 +3379,7 @@ message PodSecurityContext { // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence // for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional int64 runAsGroup = 6; @@ -3345,6 +3395,7 @@ message PodSecurityContext { // A list of groups applied to the first process run in each container, in addition // to the container's primary GID. If unspecified, no groups will be added to // any container. + // Note that this field cannot be set when spec.os.name is windows. // +optional repeated int64 supplementalGroups = 4; @@ -3357,11 +3408,13 @@ message PodSecurityContext { // 3. The permission bits are OR'd with rw-rw---- // // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional int64 fsGroup = 5; // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported // sysctls (by the container runtime) might fail to launch. + // Note that this field cannot be set when spec.os.name is windows. // +optional repeated Sysctl sysctls = 7; @@ -3371,10 +3424,12 @@ message PodSecurityContext { // It will have no effect on ephemeral volume types such as: secret, configmaps // and emptydir. // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional string fsGroupChangePolicy = 9; // The seccomp options to use by the containers in this pod. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional SeccompProfile seccompProfile = 10; } @@ -3425,7 +3480,7 @@ message PodSpec { // pod to perform user-initiated actions such as debugging. This list cannot be specified when // creating a pod, and it cannot be modified by updating the pod spec. In order to add an // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional // +patchMergeKey=name // +patchStrategy=merge @@ -3645,6 +3700,37 @@ message PodSpec { // Default to false. // +optional optional bool setHostnameAsFQDN = 35; + + // Specifies the OS of the containers in the pod. + // Some pod and container fields are restricted if this is set. + // + // If the OS field is set to linux, the following fields must be unset: + // -securityContext.windowsOptions + // + // If the OS field is set to windows, following fields must be unset: + // - spec.hostPID + // - spec.hostIPC + // - spec.securityContext.seLinuxOptions + // - spec.securityContext.seccompProfile + // - spec.securityContext.fsGroup + // - spec.securityContext.fsGroupChangePolicy + // - spec.securityContext.sysctls + // - spec.shareProcessNamespace + // - spec.securityContext.runAsUser + // - spec.securityContext.runAsGroup + // - spec.securityContext.supplementalGroups + // - spec.containers[*].securityContext.seLinuxOptions + // - spec.containers[*].securityContext.seccompProfile + // - spec.containers[*].securityContext.capabilities + // - spec.containers[*].securityContext.readOnlyRootFilesystem + // - spec.containers[*].securityContext.privileged + // - spec.containers[*].securityContext.allowPrivilegeEscalation + // - spec.containers[*].securityContext.procMount + // - spec.containers[*].securityContext.runAsUser + // - spec.containers[*].securityContext.runAsGroup + // +optional + // This is an alpha field and requires the IdentifyPodOS feature + optional PodOS os = 36; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3739,7 +3825,7 @@ message PodStatus { optional string qosClass = 9; // Status for any ephemeral containers that have run in this pod. - // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional repeated ContainerStatus ephemeralContainerStatuses = 13; } @@ -3876,7 +3962,7 @@ message PreferredSchedulingTerm { // alive or ready to receive traffic. message Probe { // The action taken to determine the health of a container - optional Handler handler = 1; + optional ProbeHandler handler = 1; // Number of seconds after the container has started before liveness probes are initiated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes @@ -3918,6 +4004,28 @@ message Probe { optional int64 terminationGracePeriodSeconds = 7; } +// ProbeHandler defines a specific action that should be taken in a probe. +// One and only one of the fields must be specified. +message ProbeHandler { + // Exec specifies the action to take. + // +optional + optional ExecAction exec = 1; + + // HTTPGet specifies the http request to perform. + // +optional + optional HTTPGetAction httpGet = 2; + + // TCPSocket specifies an action involving a TCP port. + // +optional + optional TCPSocketAction tcpSocket = 3; + + // GRPC specifies an action involving a GRPC port. + // This is an alpha field and requires enabling GRPCContainerProbe feature gate. + // +featureGate=GRPCContainerProbe + // +optional + optional GRPCAction grpc = 4; +} + // Represents a projected volume source message ProjectedVolumeSource { // list of volume projections @@ -4479,6 +4587,7 @@ message Secret { map stringData = 4; // Used to facilitate programmatic handling of secret data. + // More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types // +optional optional string type = 3; } @@ -4602,12 +4711,14 @@ message SecretVolumeSource { message SecurityContext { // The capabilities to add/drop when running containers. // Defaults to the default set of capabilities granted by the container runtime. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional Capabilities capabilities = 1; // Run container in privileged mode. // Processes in privileged containers are essentially equivalent to root on the host. // Defaults to false. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional bool privileged = 2; @@ -4615,12 +4726,14 @@ message SecurityContext { // If unspecified, the container runtime will allocate a random SELinux context for each // container. May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional SELinuxOptions seLinuxOptions = 3; // The Windows specific settings applied to all containers. // If unspecified, the options from the PodSecurityContext will be used. // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is linux. // +optional optional WindowsSecurityContextOptions windowsOptions = 10; @@ -4628,6 +4741,7 @@ message SecurityContext { // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional int64 runAsUser = 4; @@ -4635,6 +4749,7 @@ message SecurityContext { // Uses runtime default if unset. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional int64 runAsGroup = 8; @@ -4649,6 +4764,7 @@ message SecurityContext { // Whether this container has a read-only root filesystem. // Default is false. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional bool readOnlyRootFilesystem = 6; @@ -4658,6 +4774,7 @@ message SecurityContext { // AllowPrivilegeEscalation is true always when the container is: // 1) run as Privileged // 2) has CAP_SYS_ADMIN + // Note that this field cannot be set when spec.os.name is windows. // +optional optional bool allowPrivilegeEscalation = 7; @@ -4665,12 +4782,14 @@ message SecurityContext { // The default is DefaultProcMount which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional string procMount = 9; // The seccomp options to use by this container. If seccomp options are // provided at both the pod & container level, the container options // override the pod options. + // Note that this field cannot be set when spec.os.name is windows. // +optional optional SeccompProfile seccompProfile = 11; } @@ -4904,12 +5023,9 @@ message ServiceSpec { // clients must ensure that clusterIPs[0] and clusterIP have the same // value. // - // Unless the "IPv6DualStack" feature gate is enabled, this field is - // limited to one value, which must be the same as the clusterIP field. If - // the feature gate is enabled, this field may hold a maximum of two - // entries (dual-stack IPs, in either order). These IPs must correspond to - // the values of the ipFamilies field. Both clusterIPs and ipFamilies are - // governed by the ipFamilyPolicy field. + // This field may hold a maximum of two entries (dual-stack IPs, in either order). + // These IPs must correspond to the values of the ipFamilies field. Both + // clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +listType=atomic // +optional @@ -5009,17 +5125,16 @@ message ServiceSpec { optional SessionAffinityConfig sessionAffinityConfig = 14; // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this - // service, and is gated by the "IPv6DualStack" feature gate. This field - // is usually assigned automatically based on cluster configuration and the - // ipFamilyPolicy field. If this field is specified manually, the requested - // family is available in the cluster, and ipFamilyPolicy allows it, it - // will be used; otherwise creation of the service will fail. This field - // is conditionally mutable: it allows for adding or removing a secondary - // IP family, but it does not allow changing the primary IP family of the - // Service. Valid values are "IPv4" and "IPv6". This field only applies - // to Services of types ClusterIP, NodePort, and LoadBalancer, and does - // apply to "headless" services. This field will be wiped when updating a - // Service to type ExternalName. + // service. This field is usually assigned automatically based on cluster + // configuration and the ipFamilyPolicy field. If this field is specified + // manually, the requested family is available in the cluster, + // and ipFamilyPolicy allows it, it will be used; otherwise creation of + // the service will fail. This field is conditionally mutable: it allows + // for adding or removing a secondary IP family, but it does not allow + // changing the primary IP family of the Service. Valid values are "IPv4" + // and "IPv6". This field only applies to Services of types ClusterIP, + // NodePort, and LoadBalancer, and does apply to "headless" services. + // This field will be wiped when updating a Service to type ExternalName. // // This field may hold a maximum of two entries (dual-stack families, in // either order). These families must correspond to the values of the @@ -5030,14 +5145,13 @@ message ServiceSpec { repeated string ipFamilies = 19; // IPFamilyPolicy represents the dual-stack-ness requested or required by - // this Service, and is gated by the "IPv6DualStack" feature gate. If - // there is no value provided, then this field will be set to SingleStack. - // Services can be "SingleStack" (a single IP family), "PreferDualStack" - // (two IP families on dual-stack configured clusters or a single IP family - // on single-stack clusters), or "RequireDualStack" (two IP families on - // dual-stack configured clusters, otherwise fail). The ipFamilies and - // clusterIPs fields depend on the value of this field. This field will be - // wiped when updating a service to type ExternalName. + // this Service. If there is no value provided, then this field will be set + // to SingleStack. Services can be "SingleStack" (a single IP family), + // "PreferDualStack" (two IP families on dual-stack configured clusters or + // a single IP family on single-stack clusters), or "RequireDualStack" + // (two IP families on dual-stack configured clusters, otherwise fail). The + // ipFamilies and clusterIPs fields depend on the value of this field. This + // field will be wiped when updating a service to type ExternalName. // +optional optional string ipFamilyPolicy = 17; @@ -5298,7 +5412,7 @@ message TopologySpreadConstraint { // but giving higher precedence to topologies that would help reduce the // skew. // A constraint is considered "Unsatisfiable" for an incoming pod - // if and only if every possible node assigment for that pod would violate + // if and only if every possible node assignment for that pod would violate // "MaxSkew" on some topology. // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same // labelSelector spread as 3/1/1: @@ -5582,9 +5696,6 @@ message VolumeSource { // A pod can use both types of ephemeral volumes and // persistent volumes at the same time. // - // This is a beta feature and only available when the GenericEphemeralVolume - // feature gate is enabled. - // // +optional optional EphemeralVolumeSource ephemeral = 29; } diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 027d73edd..80c5dd744 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -179,9 +179,6 @@ type VolumeSource struct { // A pod can use both types of ephemeral volumes and // persistent volumes at the same time. // - // This is a beta feature and only available when the GenericEphemeralVolume - // feature gate is enabled. - // // +optional Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"` } @@ -374,6 +371,7 @@ type VolumeNodeAffinity struct { } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. +// +enum type PersistentVolumeReclaimPolicy string const ( @@ -389,6 +387,7 @@ const ( ) // PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +// +enum type PersistentVolumeMode string const ( @@ -475,6 +474,9 @@ type PersistentVolumeClaimSpec struct { // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // Resources represents the minimum resources the volume should have. + // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + // that are lower than previous value but must still be higher than capacity recorded in the + // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` @@ -520,6 +522,7 @@ type PersistentVolumeClaimSpec struct { } // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type +// +enum type PersistentVolumeClaimConditionType string const ( @@ -529,6 +532,26 @@ const ( PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" ) +// +enum +type PersistentVolumeClaimResizeStatus string + +const ( + // When expansion is complete, the empty string is set by resize controller or kubelet. + PersistentVolumeClaimNoExpansionInProgress PersistentVolumeClaimResizeStatus = "" + // State set when resize controller starts expanding the volume in control-plane + PersistentVolumeClaimControllerExpansionInProgress PersistentVolumeClaimResizeStatus = "ControllerExpansionInProgress" + // State set when expansion has failed in resize controller with a terminal error. + // Transient errors such as timeout should not set this status and should leave ResizeStatus + // unmodified, so as resize controller can resume the volume expansion. + PersistentVolumeClaimControllerExpansionFailed PersistentVolumeClaimResizeStatus = "ControllerExpansionFailed" + // State set when resize controller has finished expanding the volume but further expansion is needed on the node. + PersistentVolumeClaimNodeExpansionPending PersistentVolumeClaimResizeStatus = "NodeExpansionPending" + // State set when kubelet starts expanding the volume. + PersistentVolumeClaimNodeExpansionInProgress PersistentVolumeClaimResizeStatus = "NodeExpansionInProgress" + // State set when expansion has failed in kubelet with a terminal error. Transient errors don't set NodeExpansionFailed. + PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed" +) + // PersistentVolumeClaimCondition contails details about state of pvc type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` @@ -567,8 +590,27 @@ type PersistentVolumeClaimStatus struct { // +patchMergeKey=type // +patchStrategy=merge Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` + // The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may + // be larger than the actual capacity when a volume expansion operation is requested. + // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. + // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. + // If a volume expansion capacity request is lowered, allocatedResources is only + // lowered if there are no expansion operations in progress and if the actual volume capacity + // is equal or lower than the requested capacity. + // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. + // +featureGate=RecoverVolumeExpansionFailure + // +optional + AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,5,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` + // ResizeStatus stores status of resize operation. + // ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty + // string by resize controller or kubelet. + // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. + // +featureGate=RecoverVolumeExpansionFailure + // +optional + ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"` } +// +enum type PersistentVolumeAccessMode string const ( @@ -583,6 +625,7 @@ const ( ReadWriteOncePod PersistentVolumeAccessMode = "ReadWriteOncePod" ) +// +enum type PersistentVolumePhase string const ( @@ -601,6 +644,7 @@ const ( VolumeFailed PersistentVolumePhase = "Failed" ) +// +enum type PersistentVolumeClaimPhase string const ( @@ -614,6 +658,7 @@ const ( ClaimLost PersistentVolumeClaimPhase = "Lost" ) +// +enum type HostPathType string const ( @@ -942,6 +987,7 @@ const ( ) // Protocol defines network protocols supported for things like container ports. +// +enum type Protocol string const ( @@ -1370,7 +1416,10 @@ type PhotonPersistentDiskVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` } +// +enum type AzureDataDiskCachingMode string + +// +enum type AzureDataDiskKind string const ( @@ -1697,7 +1746,7 @@ type LocalVolumeSource struct { // Filesystem type to mount. // It applies only when the Path is a block device. // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. + // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified. // +optional FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` } @@ -1895,6 +1944,7 @@ type VolumeMount struct { } // MountPropagationMode describes mount propagation. +// +enum type MountPropagationMode string const ( @@ -2085,6 +2135,7 @@ type HTTPGetAction struct { } // URIScheme identifies the scheme used for connection to a host for Get actions +// +enum type URIScheme string const ( @@ -2105,6 +2156,19 @@ type TCPSocketAction struct { Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` } +type GRPCAction struct { + // Port number of the gRPC service. Number must be in the range 1 to 65535. + Port int32 `json:"port" protobuf:"bytes,1,opt,name=port"` + + // Service is the name of the service to place in the gRPC HealthCheckRequest + // (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + // + // If this is not specified, the default behavior is defined by gRPC. + // +optional + // +default="" + Service *string `json:"service" protobuf:"bytes,2,opt,name=service"` +} + // ExecAction describes a "run in container" action. type ExecAction struct { // Command is the command line to execute inside the container, the working directory for the @@ -2120,7 +2184,7 @@ type ExecAction struct { // alive or ready to receive traffic. type Probe struct { // The action taken to determine the health of a container - Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` + ProbeHandler `json:",inline" protobuf:"bytes,1,opt,name=handler"` // Number of seconds after the container has started before liveness probes are initiated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional @@ -2157,6 +2221,7 @@ type Probe struct { } // PullPolicy describes a policy for if/when to pull a container image +// +enum type PullPolicy string const ( @@ -2169,6 +2234,7 @@ const ( ) // PreemptionPolicy describes a policy for if/when to preempt a pod. +// +enum type PreemptionPolicy string const ( @@ -2179,6 +2245,7 @@ const ( ) // TerminationMessagePolicy describes how termination messages are retrieved from a container. +// +enum type TerminationMessagePolicy string const ( @@ -2384,10 +2451,9 @@ type Container struct { TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` } -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -type Handler struct { - // One and only one of the following should be specified. +// ProbeHandler defines a specific action that should be taken in a probe. +// One and only one of the fields must be specified. +type ProbeHandler struct { // Exec specifies the action to take. // +optional Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` @@ -2395,8 +2461,28 @@ type Handler struct { // +optional HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` // TCPSocket specifies an action involving a TCP port. - // TCP hooks not yet supported - // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` + + // GRPC specifies an action involving a GRPC port. + // This is an alpha field and requires enabling GRPCContainerProbe feature gate. + // +featureGate=GRPCContainerProbe + // +optional + GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"` +} + +// LifecycleHandler defines a specific action that should be taken in a lifecycle +// hook. One and only one of the fields, except TCPSocket must be specified. +type LifecycleHandler struct { + // Exec specifies the action to take. + // +optional + Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` + // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + // for the backward compatibility. There are no validation of this field and + // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` } @@ -2410,19 +2496,18 @@ type Lifecycle struct { // Other management of the container blocks until the hook completes. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional - PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` + PostStart *LifecycleHandler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` // PreStop is called immediately before a container is terminated due to an // API request or management event such as liveness/startup probe failure, // preemption, resource contention, etc. The handler is not called if the - // container crashes or exits. The reason for termination is passed to the - // handler. The Pod's termination grace period countdown begins before the - // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container crashes or exits. The Pod's termination grace period countdown begins before the + // PreStop hook is executed. Regardless of the outcome of the handler, the // container will eventually terminate within the Pod's termination grace - // period. Other management of the container blocks until the hook completes + // period (unless delayed by finalizers). Other management of the container blocks until the hook completes // or until the termination grace period is reached. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional - PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` + PreStop *LifecycleHandler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` } type ConditionStatus string @@ -2506,14 +2591,10 @@ type ContainerStatus struct { LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` // Specifies whether the container has passed its readiness probe. Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` - // The number of times the container has been restarted, currently based on - // the number of dead containers that have not yet been removed. - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. + // The number of times the container has been restarted. RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` // The image the container is running. - // More info: https://kubernetes.io/docs/concepts/containers/images - // TODO(dchen1107): Which image the container is running with? + // More info: https://kubernetes.io/docs/concepts/containers/images. Image string `json:"image" protobuf:"bytes,6,opt,name=image"` // ImageID of the container's image. ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` @@ -2529,6 +2610,7 @@ type ContainerStatus struct { } // PodPhase is a label for the condition of a pod at the current time. +// +enum type PodPhase string // These are the valid statuses of pods. @@ -2553,6 +2635,7 @@ const ( ) // PodConditionType is a valid value for PodCondition.Type +// +enum type PodConditionType string // These are valid conditions of pod. @@ -2602,6 +2685,7 @@ type PodCondition struct { // Only one of the following restart policies may be specified. // If none of the following policies is specified, the default one // is RestartPolicyAlways. +// +enum type RestartPolicy string const ( @@ -2611,6 +2695,7 @@ const ( ) // DNSPolicy defines how a pod's DNS will be configured. +// +enum type DNSPolicy string const ( @@ -2681,6 +2766,7 @@ type NodeSelectorRequirement struct { // A node selector operator is the set of operators that can be used in // a node selector requirement. +// +enum type NodeSelectorOperator string const ( @@ -2898,6 +2984,7 @@ type Taint struct { TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` } +// +enum type TaintEffect string const ( @@ -2951,6 +3038,7 @@ type Toleration struct { } // A toleration operator is the set of operators that can be used in a toleration. +// +enum type TolerationOperator string const ( @@ -2999,7 +3087,7 @@ type PodSpec struct { // pod to perform user-initiated actions such as debugging. This list cannot be specified when // creating a pod, and it cannot be modified by updating the pod spec. In order to add an // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional // +patchMergeKey=name // +patchStrategy=merge @@ -3190,8 +3278,57 @@ type PodSpec struct { // Default to false. // +optional SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty" protobuf:"varint,35,opt,name=setHostnameAsFQDN"` + // Specifies the OS of the containers in the pod. + // Some pod and container fields are restricted if this is set. + // + // If the OS field is set to linux, the following fields must be unset: + // -securityContext.windowsOptions + // + // If the OS field is set to windows, following fields must be unset: + // - spec.hostPID + // - spec.hostIPC + // - spec.securityContext.seLinuxOptions + // - spec.securityContext.seccompProfile + // - spec.securityContext.fsGroup + // - spec.securityContext.fsGroupChangePolicy + // - spec.securityContext.sysctls + // - spec.shareProcessNamespace + // - spec.securityContext.runAsUser + // - spec.securityContext.runAsGroup + // - spec.securityContext.supplementalGroups + // - spec.containers[*].securityContext.seLinuxOptions + // - spec.containers[*].securityContext.seccompProfile + // - spec.containers[*].securityContext.capabilities + // - spec.containers[*].securityContext.readOnlyRootFilesystem + // - spec.containers[*].securityContext.privileged + // - spec.containers[*].securityContext.allowPrivilegeEscalation + // - spec.containers[*].securityContext.procMount + // - spec.containers[*].securityContext.runAsUser + // - spec.containers[*].securityContext.runAsGroup + // +optional + // This is an alpha field and requires the IdentifyPodOS feature + OS *PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"` } +// OSName is the set of OS'es that can be used in OS. +type OSName string + +// These are valid values for OSName +const ( + Linux OSName = "linux" + Windows OSName = "windows" +) + +// PodOS defines the OS parameters of a pod. +type PodOS struct { + // Name is the name of the operating system. The currently supported values are linux and windows. + // Additional value may be defined in future and can be one of: + // https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + // Clients should expect to handle additional values and treat unrecognized values in this field as os: null + Name OSName `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// +enum type UnsatisfiableConstraintAction string const ( @@ -3236,7 +3373,7 @@ type TopologySpreadConstraint struct { // but giving higher precedence to topologies that would help reduce the // skew. // A constraint is considered "Unsatisfiable" for an incoming pod - // if and only if every possible node assigment for that pod would violate + // if and only if every possible node assignment for that pod would violate // "MaxSkew" on some topology. // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same // labelSelector spread as 3/1/1: @@ -3274,6 +3411,7 @@ type HostAlias struct { // PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume // when volume is mounted. +// +enum type PodFSGroupChangePolicy string const ( @@ -3297,11 +3435,13 @@ type PodSecurityContext struct { // container. May also be set in SecurityContext. If set in // both SecurityContext and PodSecurityContext, the value specified in SecurityContext // takes precedence for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` // The Windows specific settings applied to all containers. // If unspecified, the options within a container's SecurityContext will be used. // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is linux. // +optional WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. @@ -3309,6 +3449,7 @@ type PodSecurityContext struct { // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence // for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"` // The GID to run the entrypoint of the container process. @@ -3316,6 +3457,7 @@ type PodSecurityContext struct { // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence // for that container. + // Note that this field cannot be set when spec.os.name is windows. // +optional RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"` // Indicates that the container must run as a non-root user. @@ -3329,6 +3471,7 @@ type PodSecurityContext struct { // A list of groups applied to the first process run in each container, in addition // to the container's primary GID. If unspecified, no groups will be added to // any container. + // Note that this field cannot be set when spec.os.name is windows. // +optional SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` // A special supplemental group that applies to all containers in a pod. @@ -3340,10 +3483,12 @@ type PodSecurityContext struct { // 3. The permission bits are OR'd with rw-rw---- // // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // Note that this field cannot be set when spec.os.name is windows. // +optional FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"` // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported // sysctls (by the container runtime) might fail to launch. + // Note that this field cannot be set when spec.os.name is windows. // +optional Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"` // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume @@ -3352,9 +3497,11 @@ type PodSecurityContext struct { // It will have no effect on ephemeral volume types such as: secret, configmaps // and emptydir. // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + // Note that this field cannot be set when spec.os.name is windows. // +optional FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"` // The seccomp options to use by the containers in this pod. + // Note that this field cannot be set when spec.os.name is windows. // +optional SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,10,opt,name=seccompProfile"` } @@ -3380,6 +3527,7 @@ type SeccompProfile struct { } // SeccompProfileType defines the supported seccomp profile types. +// +enum type SeccompProfileType string const ( @@ -3388,12 +3536,12 @@ const ( // SeccompProfileTypeRuntimeDefault represents the default container runtime seccomp profile. SeccompProfileTypeRuntimeDefault SeccompProfileType = "RuntimeDefault" // SeccompProfileTypeLocalhost indicates a profile defined in a file on the node should be used. - // The file's location is based off the kubelet's deprecated flag --seccomp-profile-root. - // Once the flag support is removed the location will be /seccomp. + // The file's location relative to /seccomp. SeccompProfileTypeLocalhost SeccompProfileType = "Localhost" ) // PodQOSClass defines the supported qos classes of Pods. +// +enum type PodQOSClass string const ( @@ -3480,7 +3628,13 @@ type EphemeralContainerCommon struct { // +optional WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` // Ports are not allowed for ephemeral containers. - Ports []ContainerPort `json:"ports,omitempty" protobuf:"bytes,6,rep,name=ports"` + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. // The keys defined within a source must be a C_IDENTIFIER. All invalid keys // will be reported as an event when the container is starting. When a key exists in multiple @@ -3499,7 +3653,7 @@ type EphemeralContainerCommon struct { // already allocated to the pod. // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - // Pod volumes to mount into the container's filesystem. + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional // +patchMergeKey=mountPath @@ -3579,15 +3733,16 @@ type EphemeralContainerCommon struct { // these two types. var _ = Container(EphemeralContainerCommon{}) -// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// An EphemeralContainer is a temporary container that you may add to an existing Pod for // user-initiated activities such as debugging. Ephemeral containers have no resource or -// scheduling guarantees, and they will not be restarted when they exit or when a pod is -// removed or restarted. If an ephemeral container causes a pod to exceed its resource -// allocation, the pod may be evicted. -// Ephemeral containers may not be added by directly updating the pod spec. They must be added -// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec -// once added. -// This is an alpha feature enabled by the EphemeralContainers feature flag. +// scheduling guarantees, and they will not be restarted when they exit or when a Pod is +// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the +// Pod to exceed its resource allocation. +// +// To add an ephemeral container, use the ephemeralcontainers subresource of an existing +// Pod. Ephemeral containers may not be removed or restarted. +// +// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate. type EphemeralContainer struct { // Ephemeral containers have all of the fields of Container, plus additional fields // specific to ephemeral containers. Fields in common with Container are in the @@ -3597,8 +3752,10 @@ type EphemeralContainer struct { // If set, the name of the container from PodSpec that this ephemeral container targets. // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - // If not set then the ephemeral container is run in whatever namespaces are shared - // for the pod. Note that the container runtime must support this feature. + // If not set then the ephemeral container uses the namespaces configured in the Pod spec. + // + // The container runtime must implement support for this feature. If the runtime does not + // support namespace targeting then the result of setting this field is undefined. // +optional TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"` } @@ -3688,7 +3845,7 @@ type PodStatus struct { // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` // Status for any ephemeral containers that have run in this pod. - // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` } @@ -3936,6 +4093,7 @@ type ReplicationControllerList struct { } // Session Affinity Type string +// +enum type ServiceAffinity string const ( @@ -3965,6 +4123,7 @@ type ClientIPConfig struct { } // Service Type string describes ingress methods for a service +// +enum type ServiceType string const ( @@ -3989,6 +4148,7 @@ const ( // ServiceInternalTrafficPolicyType describes the type of traffic routing for // internal traffic +// +enum type ServiceInternalTrafficPolicyType string const ( @@ -4001,6 +4161,7 @@ const ( ) // Service External Traffic Policy Type string +// +enum type ServiceExternalTrafficPolicyType string const ( @@ -4062,6 +4223,7 @@ type LoadBalancerIngress struct { // IPFamily represents the IP Family (IPv4 or IPv6). This type is used // to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). +// +enum type IPFamily string const ( @@ -4072,6 +4234,7 @@ const ( ) // IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service +// +enum type IPFamilyPolicyType string const ( @@ -4152,12 +4315,9 @@ type ServiceSpec struct { // clients must ensure that clusterIPs[0] and clusterIP have the same // value. // - // Unless the "IPv6DualStack" feature gate is enabled, this field is - // limited to one value, which must be the same as the clusterIP field. If - // the feature gate is enabled, this field may hold a maximum of two - // entries (dual-stack IPs, in either order). These IPs must correspond to - // the values of the ipFamilies field. Both clusterIPs and ipFamilies are - // governed by the ipFamilyPolicy field. + // This field may hold a maximum of two entries (dual-stack IPs, in either order). + // These IPs must correspond to the values of the ipFamilies field. Both + // clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +listType=atomic // +optional @@ -4263,17 +4423,16 @@ type ServiceSpec struct { // IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this - // service, and is gated by the "IPv6DualStack" feature gate. This field - // is usually assigned automatically based on cluster configuration and the - // ipFamilyPolicy field. If this field is specified manually, the requested - // family is available in the cluster, and ipFamilyPolicy allows it, it - // will be used; otherwise creation of the service will fail. This field - // is conditionally mutable: it allows for adding or removing a secondary - // IP family, but it does not allow changing the primary IP family of the - // Service. Valid values are "IPv4" and "IPv6". This field only applies - // to Services of types ClusterIP, NodePort, and LoadBalancer, and does - // apply to "headless" services. This field will be wiped when updating a - // Service to type ExternalName. + // service. This field is usually assigned automatically based on cluster + // configuration and the ipFamilyPolicy field. If this field is specified + // manually, the requested family is available in the cluster, + // and ipFamilyPolicy allows it, it will be used; otherwise creation of + // the service will fail. This field is conditionally mutable: it allows + // for adding or removing a secondary IP family, but it does not allow + // changing the primary IP family of the Service. Valid values are "IPv4" + // and "IPv6". This field only applies to Services of types ClusterIP, + // NodePort, and LoadBalancer, and does apply to "headless" services. + // This field will be wiped when updating a Service to type ExternalName. // // This field may hold a maximum of two entries (dual-stack families, in // either order). These families must correspond to the values of the @@ -4284,14 +4443,13 @@ type ServiceSpec struct { IPFamilies []IPFamily `json:"ipFamilies,omitempty" protobuf:"bytes,19,opt,name=ipFamilies,casttype=IPFamily"` // IPFamilyPolicy represents the dual-stack-ness requested or required by - // this Service, and is gated by the "IPv6DualStack" feature gate. If - // there is no value provided, then this field will be set to SingleStack. - // Services can be "SingleStack" (a single IP family), "PreferDualStack" - // (two IP families on dual-stack configured clusters or a single IP family - // on single-stack clusters), or "RequireDualStack" (two IP families on - // dual-stack configured clusters, otherwise fail). The ipFamilies and - // clusterIPs fields depend on the value of this field. This field will be - // wiped when updating a service to type ExternalName. + // this Service. If there is no value provided, then this field will be set + // to SingleStack. Services can be "SingleStack" (a single IP family), + // "PreferDualStack" (two IP families on dual-stack configured clusters or + // a single IP family on single-stack clusters), or "RequireDualStack" + // (two IP families on dual-stack configured clusters, otherwise fail). The + // ipFamilies and clusterIPs fields depend on the value of this field. This + // field will be wiped when updating a service to type ExternalName. // +optional IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"` @@ -4884,6 +5042,7 @@ type ContainerImage struct { SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"` } +// +enum type NodePhase string // These are the valid phases of node. @@ -4896,6 +5055,7 @@ const ( NodeTerminated NodePhase = "Terminated" ) +// +enum type NodeConditionType string // These are valid conditions of node. Currently, we don't have enough information to decide @@ -4934,6 +5094,7 @@ type NodeCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } +// +enum type NodeAddressType string // These are valid address type of node. @@ -5089,6 +5250,7 @@ type NamespaceStatus struct { Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } +// +enum type NamespacePhase string // These are the valid phases of a namespace. @@ -5105,6 +5267,7 @@ const ( NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating" ) +// +enum type NamespaceConditionType string // These are valid conditions of a namespace. @@ -5304,12 +5467,10 @@ type PodExecOptions struct { Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` // Redirect the standard output stream of the pod for this call. - // Defaults to true. // +optional Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` // Redirect the standard error stream of the pod for this call. - // Defaults to true. // +optional Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` @@ -5598,6 +5759,7 @@ type EventList struct { type List metav1.List // LimitType is a type of object that is limited +// +enum type LimitType string const ( @@ -5714,6 +5876,7 @@ const ( ) // A ResourceQuotaScope defines a filter that must match each object tracked by a quota +// +enum type ResourceQuotaScope string const ( @@ -5776,6 +5939,7 @@ type ScopedResourceSelectorRequirement struct { // A scope selector operator is the set of operators that can be used in // a scope selector requirement. +// +enum type ScopeSelectorOperator string const ( @@ -5868,6 +6032,7 @@ type Secret struct { StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"` // Used to facilitate programmatic handling of secret data. + // More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types // +optional Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"` } @@ -6144,34 +6309,40 @@ type DownwardAPIProjection struct { type SecurityContext struct { // The capabilities to add/drop when running containers. // Defaults to the default set of capabilities granted by the container runtime. + // Note that this field cannot be set when spec.os.name is windows. // +optional Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"` // Run container in privileged mode. // Processes in privileged containers are essentially equivalent to root on the host. // Defaults to false. + // Note that this field cannot be set when spec.os.name is windows. // +optional Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"` // The SELinux context to be applied to the container. // If unspecified, the container runtime will allocate a random SELinux context for each // container. May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` // The Windows specific settings applied to all containers. // If unspecified, the options from the PodSecurityContext will be used. // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is linux. // +optional WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"` // The GID to run the entrypoint of the container process. // Uses runtime default if unset. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. + // Note that this field cannot be set when spec.os.name is windows. // +optional RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"` // Indicates that the container must run as a non-root user. @@ -6184,6 +6355,7 @@ type SecurityContext struct { RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"` // Whether this container has a read-only root filesystem. // Default is false. + // Note that this field cannot be set when spec.os.name is windows. // +optional ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` // AllowPrivilegeEscalation controls whether a process can gain more @@ -6192,21 +6364,25 @@ type SecurityContext struct { // AllowPrivilegeEscalation is true always when the container is: // 1) run as Privileged // 2) has CAP_SYS_ADMIN + // Note that this field cannot be set when spec.os.name is windows. // +optional AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"` // procMount denotes the type of proc mount to use for the containers. // The default is DefaultProcMount which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. + // Note that this field cannot be set when spec.os.name is windows. // +optional ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"` // The seccomp options to use by this container. If seccomp options are // provided at both the pod & container level, the container options // override the pod options. + // Note that this field cannot be set when spec.os.name is windows. // +optional SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,11,opt,name=seccompProfile"` } +// +enum type ProcMountType string const ( diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 8d6327375..0a60e7008 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -428,8 +428,8 @@ var map_ContainerStatus = map[string]string{ "state": "Details about the container's current condition.", "lastState": "Details about the container's last termination condition.", "ready": "Specifies whether the container has passed its readiness probe.", - "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", - "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", + "restartCount": "The number of times the container has been restarted.", + "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images.", "imageID": "ImageID of the container's image.", "containerID": "Container's ID in the format 'docker://'.", "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", @@ -579,8 +579,8 @@ func (EnvVarSource) SwaggerDoc() map[string]string { } var map_EphemeralContainer = map[string]string{ - "": "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.", - "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.", + "": "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.\n\nThis is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate.", + "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.", } func (EphemeralContainer) SwaggerDoc() map[string]string { @@ -598,7 +598,7 @@ var map_EphemeralContainerCommon = map[string]string{ "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", - "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Probes are not allowed for ephemeral containers.", "readinessProbe": "Probes are not allowed for ephemeral containers.", @@ -749,6 +749,15 @@ func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { return map_GCEPersistentDiskVolumeSource } +var map_GRPCAction = map[string]string{ + "port": "Port number of the gRPC service. Number must be in the range 1 to 65535.", + "service": "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.", +} + +func (GRPCAction) SwaggerDoc() map[string]string { + return map_GRPCAction +} + var map_GitRepoVolumeSource = map[string]string{ "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "repository": "Repository URL", @@ -806,17 +815,6 @@ func (HTTPHeader) SwaggerDoc() map[string]string { return map_HTTPHeader } -var map_Handler = map[string]string{ - "": "Handler defines a specific action that should be taken", - "exec": "One and only one of the following should be specified. Exec specifies the action to take.", - "httpGet": "HTTPGet specifies the http request to perform.", - "tcpSocket": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", -} - -func (Handler) SwaggerDoc() map[string]string { - return map_Handler -} - var map_HostAlias = map[string]string{ "": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", "ip": "IP address of the host file entry.", @@ -889,13 +887,24 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", } func (Lifecycle) SwaggerDoc() map[string]string { return map_Lifecycle } +var map_LifecycleHandler = map[string]string{ + "": "LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.", + "exec": "Exec specifies the action to take.", + "httpGet": "HTTPGet specifies the http request to perform.", + "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", +} + +func (LifecycleHandler) SwaggerDoc() map[string]string { + return map_LifecycleHandler +} + var map_LimitRange = map[string]string{ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -971,7 +980,7 @@ func (LocalObjectReference) SwaggerDoc() map[string]string { var map_LocalVolumeSource = map[string]string{ "": "Local represents directly-attached storage with node affinity (Beta feature)", "path": "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", - "fsType": "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a fileystem if unspecified.", + "fsType": "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.", } func (LocalVolumeSource) SwaggerDoc() map[string]string { @@ -1297,7 +1306,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", "accessModes": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", "selector": "A label query over volumes to consider for binding.", - "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "resources": "Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", @@ -1310,11 +1319,13 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimStatus = map[string]string{ - "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", - "phase": "Phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "capacity": "Represents the actual resources of the underlying volume.", - "conditions": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "Phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "capacity": "Represents the actual resources of the underlying volume.", + "conditions": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "allocatedResources": "The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "resizeStatus": "ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1511,8 +1522,8 @@ func (PodDNSConfigOption) SwaggerDoc() map[string]string { var map_PodExecOptions = map[string]string{ "": "PodExecOptions is the query options to a Pod's remote exec call.", "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", - "stdout": "Redirect the standard output stream of the pod for this call. Defaults to true.", - "stderr": "Redirect the standard error stream of the pod for this call. Defaults to true.", + "stdout": "Redirect the standard output stream of the pod for this call.", + "stderr": "Redirect the standard error stream of the pod for this call.", "tty": "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.", "container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.", "command": "Command is the remote command to execute. argv array. Not executed within a shell.", @@ -1558,6 +1569,15 @@ func (PodLogOptions) SwaggerDoc() map[string]string { return map_PodLogOptions } +var map_PodOS = map[string]string{ + "": "PodOS defines the OS parameters of a pod.", + "name": "Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null", +} + +func (PodOS) SwaggerDoc() map[string]string { + return map_PodOS +} + var map_PodPortForwardOptions = map[string]string{ "": "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.", "ports": "List of ports to forward Required when using WebSockets", @@ -1587,16 +1607,16 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { var map_PodSecurityContext = map[string]string{ "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.", "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", - "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.", - "seccompProfile": "The seccomp options to use by the containers in this pod.", + "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", + "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", + "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", } func (PodSecurityContext) SwaggerDoc() map[string]string { @@ -1617,7 +1637,7 @@ var map_PodSpec = map[string]string{ "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", - "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.", + "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.", "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", @@ -1649,6 +1669,7 @@ var map_PodSpec = map[string]string{ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.", "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", + "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is an alpha field and requires the IdentifyPodOS feature", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1669,7 +1690,7 @@ var map_PodStatus = map[string]string{ "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", - "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.", + "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1782,6 +1803,18 @@ func (Probe) SwaggerDoc() map[string]string { return map_Probe } +var map_ProbeHandler = map[string]string{ + "": "ProbeHandler defines a specific action that should be taken in a probe. One and only one of the fields must be specified.", + "exec": "Exec specifies the action to take.", + "httpGet": "HTTPGet specifies the http request to perform.", + "tcpSocket": "TCPSocket specifies an action involving a TCP port.", + "grpc": "GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.", +} + +func (ProbeHandler) SwaggerDoc() map[string]string { + return map_ProbeHandler +} + var map_ProjectedVolumeSource = map[string]string{ "": "Represents a projected volume source", "sources": "list of volume projections", @@ -2056,7 +2089,7 @@ var map_Secret = map[string]string{ "immutable": "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.", "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API.", - "type": "Used to facilitate programmatic handling of secret data.", + "type": "Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types", } func (Secret) SwaggerDoc() map[string]string { @@ -2126,17 +2159,17 @@ func (SecretVolumeSource) SwaggerDoc() map[string]string { var map_SecurityContext = map[string]string{ "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", - "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", - "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", - "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.", + "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.", + "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", - "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN", - "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.", - "seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options.", + "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.", + "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.", + "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", + "seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.", } func (SecurityContext) SwaggerDoc() map[string]string { @@ -2234,7 +2267,7 @@ var map_ServiceSpec = map[string]string{ "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "clusterIPs": "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nUnless the \"IPv6DualStack\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "clusterIPs": "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", @@ -2245,8 +2278,8 @@ var map_ServiceSpec = map[string]string{ "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).", "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", - "ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \"IPv6DualStack\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", - "ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \"IPv6DualStack\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", + "ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", + "ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature.", "loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.", "internalTrafficPolicy": "InternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. \"Cluster\" routes internal traffic to a Service to all endpoints. \"Local\" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is \"Cluster\".", @@ -2369,7 +2402,7 @@ var map_TopologySpreadConstraint = map[string]string{ "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ", "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", - "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assigment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", + "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", } @@ -2472,7 +2505,7 @@ var map_VolumeSource = map[string]string{ "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", "csi": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", - "ephemeral": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.\n\nThis is a beta feature and only available when the GenericEphemeralVolume feature gate is enabled.", + "ephemeral": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", } func (VolumeSource) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index ff660ae27..fc951ad44 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -1668,6 +1669,27 @@ func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSour return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCAction) DeepCopyInto(out *GRPCAction) { + *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCAction. +func (in *GRPCAction) DeepCopy() *GRPCAction { + if in == nil { + return nil + } + out := new(GRPCAction) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { *out = *in @@ -1759,37 +1781,6 @@ func (in *HTTPHeader) DeepCopy() *HTTPHeader { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Handler) DeepCopyInto(out *Handler) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecAction) - (*in).DeepCopyInto(*out) - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - (*in).DeepCopyInto(*out) - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. -func (in *Handler) DeepCopy() *Handler { - if in == nil { - return nil - } - out := new(Handler) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HostAlias) DeepCopyInto(out *HostAlias) { *out = *in @@ -1920,12 +1911,12 @@ func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { *out = *in if in.PostStart != nil { in, out := &in.PostStart, &out.PostStart - *out = new(Handler) + *out = new(LifecycleHandler) (*in).DeepCopyInto(*out) } if in.PreStop != nil { in, out := &in.PreStop, &out.PreStop - *out = new(Handler) + *out = new(LifecycleHandler) (*in).DeepCopyInto(*out) } return @@ -1941,6 +1932,37 @@ func (in *Lifecycle) DeepCopy() *Lifecycle { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHandler. +func (in *LifecycleHandler) DeepCopy() *LifecycleHandler { + if in == nil { + return nil + } + out := new(LifecycleHandler) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LimitRange) DeepCopyInto(out *LimitRange) { *out = *in @@ -2974,6 +2996,18 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AllocatedResources != nil { + in, out := &in.AllocatedResources, &out.AllocatedResources + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.ResizeStatus != nil { + in, out := &in.ResizeStatus, &out.ResizeStatus + *out = new(PersistentVolumeClaimResizeStatus) + **out = **in + } return } @@ -3600,6 +3634,22 @@ func (in *PodLogOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodOS) DeepCopyInto(out *PodOS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodOS. +func (in *PodOS) DeepCopy() *PodOS { + if in == nil { + return nil + } + out := new(PodOS) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { *out = *in @@ -3894,6 +3944,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(bool) **out = **in } + if in.OS != nil { + in, out := &in.OS, &out.OS + *out = new(PodOS) + **out = **in + } return } @@ -4161,7 +4216,7 @@ func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Probe) DeepCopyInto(out *Probe) { *out = *in - in.Handler.DeepCopyInto(&out.Handler) + in.ProbeHandler.DeepCopyInto(&out.ProbeHandler) if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds *out = new(int64) @@ -4180,6 +4235,42 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeHandler) DeepCopyInto(out *ProbeHandler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(GRPCAction) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeHandler. +func (in *ProbeHandler) DeepCopy() *ProbeHandler { + if in == nil { + return nil + } + out := new(ProbeHandler) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { *out = *in diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index b66fdd884..3c45ba3e7 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -55,6 +55,7 @@ type EndpointSlice struct { } // AddressType represents the type of address referred to by an endpoint. +// +enum type AddressType string const ( diff --git a/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go index 31a912386..caa872af0 100644 --- a/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go index f13536b4b..13b9544b0 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.prerelease-lifecycle.go index c0f2c63f0..ebca42477 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/events/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1/zz_generated.deepcopy.go index 19a3c5f77..738f0163d 100644 --- a/vendor/k8s.io/api/events/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/events/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go index 779ebaf6e..1e073ae9c 100644 --- a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.prerelease-lifecycle.go index 2ab7b412b..227ca4b7d 100644 --- a/vendor/k8s.io/api/events/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 838315610..e0961588e 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go index 5023dd31a..963aaffba 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto index 2e0d0eae2..6c0cf9342 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -352,8 +352,10 @@ message QueuingConfiguration { // ResourcePolicyRule matches a resource request if and only if: (a) // at least one member of verbs matches the request, (b) at least one // member of apiGroups matches the request, (c) at least one member of -// resources matches the request, and (d) least one member of -// namespaces matches the request. +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. message ResourcePolicyRule { // `verbs` is a list of matching verbs and may not be empty. // "*" matches all verbs and, if present, must be the only entry. diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go index afcc6fa34..99b609494 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -241,8 +241,10 @@ type ServiceAccountSubject struct { // ResourcePolicyRule matches a resource request if and only if: (a) // at least one member of verbs matches the request, (b) at least one // member of apiGroups matches the request, (c) at least one member of -// resources matches the request, and (d) least one member of -// namespaces matches the request. +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. type ResourcePolicyRule struct { // `verbs` is a list of matching verbs and may not be empty. // "*" matches all verbs and, if present, must be the only entry. diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go index 8171360bb..1827be02d 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -215,7 +215,7 @@ func (QueuingConfiguration) SwaggerDoc() map[string]string { } var map_ResourcePolicyRule = map[string]string{ - "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.", + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go index f5d37954b..7f73f4606 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go index 4152aa2a9..a248aec45 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto index 40f441c5c..309d397ff 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -352,8 +352,10 @@ message QueuingConfiguration { // ResourcePolicyRule matches a resource request if and only if: (a) // at least one member of verbs matches the request, (b) at least one // member of apiGroups matches the request, (c) at least one member of -// resources matches the request, and (d) least one member of -// namespaces matches the request. +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. message ResourcePolicyRule { // `verbs` is a list of matching verbs and may not be empty. // "*" matches all verbs and, if present, must be the only entry. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go index c38021d69..767a32e7f 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -279,8 +279,10 @@ type ServiceAccountSubject struct { // ResourcePolicyRule matches a resource request if and only if: (a) // at least one member of verbs matches the request, (b) at least one // member of apiGroups matches the request, (c) at least one member of -// resources matches the request, and (d) least one member of -// namespaces matches the request. +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. type ResourcePolicyRule struct { // `verbs` is a list of matching verbs and may not be empty. // "*" matches all verbs and, if present, must be the only entry. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index 0f2f05e3a..b3752b6fb 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -215,7 +215,7 @@ func (QueuingConfiguration) SwaggerDoc() map[string]string { } var map_ResourcePolicyRule = map[string]string{ - "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.", + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go index c8f6e2306..b7b84634a 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go index d0f229718..f02fcdccd 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag_114.go b/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go similarity index 58% rename from vendor/k8s.io/component-base/cli/flag/ciphersuites_flag_114.go rename to vendor/k8s.io/api/flowcontrol/v1beta2/doc.go index 45b39eb0f..53b460d37 100644 --- a/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag_114.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go @@ -1,7 +1,5 @@ -// +build go1.14 - /* -Copyright 2020 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,14 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package flag +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true -import ( - "crypto/tls" -) +// +groupName=flowcontrol.apiserver.k8s.io -func init() { - // support official IANA names as well on go1.14 - ciphers["TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"] = tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 - ciphers["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"] = tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 -} +// Package v1beta2 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". +package v1beta2 // import "k8s.io/api/flowcontrol/v1beta2" diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go new file mode 100644 index 000000000..fe550271b --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go @@ -0,0 +1,5367 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto + +package v1beta2 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } +func (*FlowDistinguisherMethod) ProtoMessage() {} +func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{0} +} +func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src) +} +func (m *FlowDistinguisherMethod) XXX_Size() int { + return m.Size() +} +func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() { + xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo + +func (m *FlowSchema) Reset() { *m = FlowSchema{} } +func (*FlowSchema) ProtoMessage() {} +func (*FlowSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{1} +} +func (m *FlowSchema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchema.Merge(m, src) +} +func (m *FlowSchema) XXX_Size() int { + return m.Size() +} +func (m *FlowSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchema proto.InternalMessageInfo + +func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } +func (*FlowSchemaCondition) ProtoMessage() {} +func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{2} +} +func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaCondition.Merge(m, src) +} +func (m *FlowSchemaCondition) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaCondition) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo + +func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } +func (*FlowSchemaList) ProtoMessage() {} +func (*FlowSchemaList) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{3} +} +func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaList.Merge(m, src) +} +func (m *FlowSchemaList) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaList) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaList.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo + +func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } +func (*FlowSchemaSpec) ProtoMessage() {} +func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{4} +} +func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaSpec.Merge(m, src) +} +func (m *FlowSchemaSpec) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo + +func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } +func (*FlowSchemaStatus) ProtoMessage() {} +func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{5} +} +func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaStatus.Merge(m, src) +} +func (m *FlowSchemaStatus) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo + +func (m *GroupSubject) Reset() { *m = GroupSubject{} } +func (*GroupSubject) ProtoMessage() {} +func (*GroupSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{6} +} +func (m *GroupSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSubject.Merge(m, src) +} +func (m *GroupSubject) XXX_Size() int { + return m.Size() +} +func (m *GroupSubject) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSubject proto.InternalMessageInfo + +func (m *LimitResponse) Reset() { *m = LimitResponse{} } +func (*LimitResponse) ProtoMessage() {} +func (*LimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{7} +} +func (m *LimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitResponse.Merge(m, src) +} +func (m *LimitResponse) XXX_Size() int { + return m.Size() +} +func (m *LimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitResponse proto.InternalMessageInfo + +func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } +func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} +func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{8} +} +func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } +func (*NonResourcePolicyRule) ProtoMessage() {} +func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{9} +} +func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourcePolicyRule.Merge(m, src) +} +func (m *NonResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo + +func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } +func (*PolicyRulesWithSubjects) ProtoMessage() {} +func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{10} +} +func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src) +} +func (m *PolicyRulesWithSubjects) XXX_Size() int { + return m.Size() +} +func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo + +func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } +func (*PriorityLevelConfiguration) ProtoMessage() {} +func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{11} +} +func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src) +} +func (m *PriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } +func (*PriorityLevelConfigurationCondition) ProtoMessage() {} +func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{12} +} +func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src) +} +func (m *PriorityLevelConfigurationCondition) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } +func (*PriorityLevelConfigurationList) ProtoMessage() {} +func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{13} +} +func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src) +} +func (m *PriorityLevelConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } +func (*PriorityLevelConfigurationReference) ProtoMessage() {} +func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{14} +} +func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src) +} +func (m *PriorityLevelConfigurationReference) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } +func (*PriorityLevelConfigurationSpec) ProtoMessage() {} +func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{15} +} +func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src) +} +func (m *PriorityLevelConfigurationSpec) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } +func (*PriorityLevelConfigurationStatus) ProtoMessage() {} +func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{16} +} +func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src) +} +func (m *PriorityLevelConfigurationStatus) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo + +func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } +func (*QueuingConfiguration) ProtoMessage() {} +func (*QueuingConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{17} +} +func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QueuingConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueuingConfiguration.Merge(m, src) +} +func (m *QueuingConfiguration) XXX_Size() int { + return m.Size() +} +func (m *QueuingConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo + +func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } +func (*ResourcePolicyRule) ProtoMessage() {} +func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{18} +} +func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePolicyRule.Merge(m, src) +} +func (m *ResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *ResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo + +func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } +func (*ServiceAccountSubject) ProtoMessage() {} +func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{19} +} +func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountSubject.Merge(m, src) +} +func (m *ServiceAccountSubject) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountSubject) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{20} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func (m *UserSubject) Reset() { *m = UserSubject{} } +func (*UserSubject) ProtoMessage() {} +func (*UserSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_ed300aa8e672704e, []int{21} +} +func (m *UserSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSubject.Merge(m, src) +} +func (m *UserSubject) XXX_Size() int { + return m.Size() +} +func (m *UserSubject) XXX_DiscardUnknown() { + xxx_messageInfo_UserSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSubject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1beta2.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1beta2.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta2.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1beta2.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta2.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1beta2.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1beta2.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1beta2.UserSubject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto", fileDescriptor_ed300aa8e672704e) +} + +var fileDescriptor_ed300aa8e672704e = []byte{ + // 1495 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcb, 0x73, 0xdb, 0x44, + 0x18, 0x8f, 0x1c, 0x3b, 0x89, 0xbf, 0x3c, 0xbb, 0x69, 0x27, 0x9e, 0x74, 0xc6, 0x4e, 0xc5, 0x0c, + 0x05, 0xda, 0xca, 0x6d, 0x69, 0x69, 0x81, 0xe1, 0x11, 0xa5, 0x50, 0x4a, 0x93, 0x34, 0xdd, 0xb4, + 0xc0, 0x94, 0xce, 0x50, 0x59, 0xde, 0xd8, 0x6a, 0x6c, 0x49, 0xd5, 0xae, 0x9c, 0x09, 0xbd, 0x30, + 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x3b, 0xff, 0x00, 0x47, 0x3a, 0x9c, 0x7a, 0xec, 0xc9, 0x50, + 0x73, 0xe2, 0xc0, 0x1d, 0x7a, 0x62, 0x76, 0xb5, 0x92, 0x2c, 0xbf, 0xe4, 0x69, 0x67, 0x7a, 0xe2, + 0x66, 0x7d, 0x8f, 0xdf, 0xf7, 0xd8, 0xdf, 0x7e, 0xfb, 0x19, 0xae, 0xee, 0x5f, 0xa6, 0x9a, 0xe5, + 0x94, 0xf7, 0xfd, 0x0a, 0xf1, 0x6c, 0xc2, 0x08, 0x2d, 0xb7, 0x88, 0x5d, 0x75, 0xbc, 0xb2, 0x54, + 0x18, 0xae, 0x55, 0xde, 0x6b, 0x38, 0x07, 0xa6, 0x63, 0x33, 0xcf, 0x69, 0x94, 0x5b, 0xe7, 0x2a, + 0x84, 0x19, 0xe7, 0xcb, 0x35, 0x62, 0x13, 0xcf, 0x60, 0xa4, 0xaa, 0xb9, 0x9e, 0xc3, 0x1c, 0x54, + 0x0c, 0xec, 0x35, 0xc3, 0xb5, 0xb4, 0x2e, 0x7b, 0x4d, 0xda, 0xaf, 0x9e, 0xa9, 0x59, 0xac, 0xee, + 0x57, 0x34, 0xd3, 0x69, 0x96, 0x6b, 0x4e, 0xcd, 0x29, 0x0b, 0xb7, 0x8a, 0xbf, 0x27, 0xbe, 0xc4, + 0x87, 0xf8, 0x15, 0xc0, 0xad, 0x5e, 0x88, 0xc3, 0x37, 0x0d, 0xb3, 0x6e, 0xd9, 0xc4, 0x3b, 0x2c, + 0xbb, 0xfb, 0x35, 0x2e, 0xa0, 0xe5, 0x26, 0x61, 0x46, 0xb9, 0x75, 0xae, 0x37, 0x89, 0xd5, 0xf2, + 0x30, 0x2f, 0xcf, 0xb7, 0x99, 0xd5, 0x24, 0x7d, 0x0e, 0x6f, 0xa5, 0x39, 0x50, 0xb3, 0x4e, 0x9a, + 0x46, 0xaf, 0x9f, 0x7a, 0x07, 0x56, 0x3e, 0x6e, 0x38, 0x07, 0x57, 0x2c, 0xca, 0x2c, 0xbb, 0xe6, + 0x5b, 0xb4, 0x4e, 0xbc, 0x2d, 0xc2, 0xea, 0x4e, 0x15, 0x7d, 0x00, 0x59, 0x76, 0xe8, 0x92, 0x82, + 0xb2, 0xa6, 0xbc, 0x96, 0xd7, 0x4f, 0x3d, 0x6a, 0x97, 0x26, 0x3a, 0xed, 0x52, 0xf6, 0xd6, 0xa1, + 0x4b, 0x9e, 0xb5, 0x4b, 0xc7, 0x87, 0xb8, 0x71, 0x35, 0x16, 0x8e, 0xea, 0xf7, 0x19, 0x00, 0x6e, + 0xb5, 0x2b, 0x42, 0xa3, 0x7b, 0x30, 0xc3, 0xcb, 0xad, 0x1a, 0xcc, 0x10, 0x98, 0xb3, 0xe7, 0xcf, + 0x6a, 0x71, 0xaf, 0xa3, 0xac, 0x35, 0x77, 0xbf, 0xc6, 0x05, 0x54, 0xe3, 0xd6, 0x5a, 0xeb, 0x9c, + 0x76, 0xa3, 0x72, 0x9f, 0x98, 0x6c, 0x8b, 0x30, 0x43, 0x47, 0x32, 0x0b, 0x88, 0x65, 0x38, 0x42, + 0x45, 0x3b, 0x90, 0xa5, 0x2e, 0x31, 0x0b, 0x19, 0x81, 0xae, 0x69, 0xa3, 0x4f, 0x52, 0x8b, 0x73, + 0xdb, 0x75, 0x89, 0xa9, 0xcf, 0x85, 0x15, 0xf2, 0x2f, 0x2c, 0x90, 0xd0, 0x17, 0x30, 0x45, 0x99, + 0xc1, 0x7c, 0x5a, 0x98, 0xec, 0xcb, 0x38, 0x0d, 0x53, 0xf8, 0xe9, 0x0b, 0x12, 0x75, 0x2a, 0xf8, + 0xc6, 0x12, 0x4f, 0x7d, 0x92, 0x81, 0xe5, 0xd8, 0x78, 0xc3, 0xb1, 0xab, 0x16, 0xb3, 0x1c, 0x1b, + 0xbd, 0x9b, 0xe8, 0xfa, 0xc9, 0x9e, 0xae, 0xaf, 0x0c, 0x70, 0x89, 0x3b, 0x8e, 0xde, 0x8e, 0xd2, + 0xcd, 0x08, 0xf7, 0x13, 0xc9, 0xe0, 0xcf, 0xda, 0xa5, 0xc5, 0xc8, 0x2d, 0x99, 0x0f, 0x6a, 0x01, + 0x6a, 0x18, 0x94, 0xdd, 0xf2, 0x0c, 0x9b, 0x06, 0xb0, 0x56, 0x93, 0xc8, 0xaa, 0xdf, 0x18, 0xef, + 0x9c, 0xb8, 0x87, 0xbe, 0x2a, 0x43, 0xa2, 0xcd, 0x3e, 0x34, 0x3c, 0x20, 0x02, 0x7a, 0x15, 0xa6, + 0x3c, 0x62, 0x50, 0xc7, 0x2e, 0x64, 0x45, 0xca, 0x51, 0xbf, 0xb0, 0x90, 0x62, 0xa9, 0x45, 0xaf, + 0xc3, 0x74, 0x93, 0x50, 0x6a, 0xd4, 0x48, 0x21, 0x27, 0x0c, 0x17, 0xa5, 0xe1, 0xf4, 0x56, 0x20, + 0xc6, 0xa1, 0x5e, 0xfd, 0x45, 0x81, 0x85, 0xb8, 0x4f, 0x9b, 0x16, 0x65, 0xe8, 0x6e, 0x1f, 0xf7, + 0xb4, 0xf1, 0x6a, 0xe2, 0xde, 0x82, 0x79, 0x4b, 0x32, 0xdc, 0x4c, 0x28, 0xe9, 0xe2, 0xdd, 0x0d, + 0xc8, 0x59, 0x8c, 0x34, 0x79, 0xd7, 0x27, 0x7b, 0xda, 0x95, 0x42, 0x12, 0x7d, 0x5e, 0xc2, 0xe6, + 0xae, 0x71, 0x00, 0x1c, 0xe0, 0xa8, 0x7f, 0x4d, 0x76, 0x57, 0xc0, 0xf9, 0x88, 0x7e, 0x52, 0x60, + 0xd5, 0xf5, 0x2c, 0xc7, 0xb3, 0xd8, 0xe1, 0x26, 0x69, 0x91, 0xc6, 0x86, 0x63, 0xef, 0x59, 0x35, + 0xdf, 0x33, 0x78, 0x2b, 0x65, 0x51, 0x1b, 0x69, 0x91, 0x77, 0x86, 0x22, 0x60, 0xb2, 0x47, 0x3c, + 0x62, 0x9b, 0x44, 0x57, 0x65, 0x4a, 0xab, 0x23, 0x8c, 0x47, 0xa4, 0x82, 0x3e, 0x05, 0xd4, 0x34, + 0x18, 0xef, 0x68, 0x6d, 0xc7, 0x23, 0x26, 0xa9, 0x72, 0x54, 0x41, 0xc8, 0x5c, 0xcc, 0x8e, 0xad, + 0x3e, 0x0b, 0x3c, 0xc0, 0x0b, 0x7d, 0xab, 0xc0, 0x72, 0xb5, 0x7f, 0xc8, 0x48, 0x5e, 0x5e, 0x1a, + 0xa7, 0xd1, 0x03, 0x66, 0x94, 0xbe, 0xd2, 0x69, 0x97, 0x96, 0x07, 0x28, 0xf0, 0xa0, 0x60, 0xe8, + 0x2e, 0xe4, 0x3c, 0xbf, 0x41, 0x68, 0x21, 0x2b, 0x8e, 0x37, 0x35, 0xea, 0x8e, 0xd3, 0xb0, 0xcc, + 0x43, 0xcc, 0x5d, 0x3e, 0xb7, 0x58, 0x7d, 0xd7, 0x17, 0xb3, 0x8a, 0xc6, 0x67, 0x2d, 0x54, 0x38, + 0x00, 0x55, 0x1f, 0xc2, 0x52, 0xef, 0xd0, 0x40, 0x35, 0x00, 0x33, 0xbc, 0xa7, 0xb4, 0xa0, 0x88, + 0xb0, 0x6f, 0x8e, 0xcf, 0xaa, 0xe8, 0x8e, 0xc7, 0xf3, 0x32, 0x12, 0x51, 0xdc, 0x05, 0xad, 0x9e, + 0x85, 0xb9, 0xab, 0x9e, 0xe3, 0xbb, 0x32, 0x47, 0xb4, 0x06, 0x59, 0xdb, 0x68, 0x86, 0xd3, 0x27, + 0x9a, 0x88, 0xdb, 0x46, 0x93, 0x60, 0xa1, 0x51, 0x7f, 0x54, 0x60, 0x7e, 0xd3, 0x6a, 0x5a, 0x0c, + 0x13, 0xea, 0x3a, 0x36, 0x25, 0xe8, 0x62, 0x62, 0x62, 0x9d, 0xe8, 0x99, 0x58, 0x47, 0x12, 0xc6, + 0x5d, 0xb3, 0xea, 0x4b, 0x98, 0x7e, 0xe0, 0x13, 0xdf, 0xb2, 0x6b, 0x72, 0x5e, 0x5f, 0x48, 0x2b, + 0xf0, 0x66, 0x60, 0x9e, 0x60, 0x9b, 0x3e, 0xcb, 0x47, 0x80, 0xd4, 0xe0, 0x10, 0x51, 0xfd, 0x5b, + 0x81, 0x13, 0x22, 0x30, 0xa9, 0x0e, 0x67, 0x31, 0xba, 0x0b, 0x05, 0x83, 0x52, 0xdf, 0x23, 0xd5, + 0x0d, 0xc7, 0x36, 0x7d, 0x8f, 0xf3, 0xff, 0x70, 0xb7, 0x6e, 0x78, 0x84, 0x8a, 0x6a, 0x72, 0xfa, + 0x9a, 0xac, 0xa6, 0xb0, 0x3e, 0xc4, 0x0e, 0x0f, 0x45, 0x40, 0xf7, 0x61, 0xbe, 0xd1, 0x5d, 0xbb, + 0x2c, 0xf3, 0x4c, 0x5a, 0x99, 0x89, 0x86, 0xe9, 0xc7, 0x64, 0x06, 0xc9, 0xa6, 0xe3, 0x24, 0xb4, + 0x7a, 0x00, 0xc7, 0xb6, 0xf9, 0x1d, 0xa6, 0x8e, 0xef, 0x99, 0x24, 0x26, 0x20, 0x2a, 0x41, 0xae, + 0x45, 0xbc, 0x4a, 0x40, 0xa2, 0xbc, 0x9e, 0xe7, 0xf4, 0xfb, 0x8c, 0x0b, 0x70, 0x20, 0x47, 0xef, + 0xc1, 0xa2, 0x1d, 0x7b, 0xde, 0xc6, 0x9b, 0xb4, 0x30, 0x25, 0x4c, 0x97, 0x3b, 0xed, 0xd2, 0xe2, + 0x76, 0x52, 0x85, 0x7b, 0x6d, 0xd5, 0x76, 0x06, 0x56, 0x86, 0xf0, 0x1d, 0xdd, 0x86, 0x19, 0x2a, + 0x7f, 0x4b, 0x0e, 0x9f, 0x4c, 0xab, 0x5d, 0xfa, 0xc6, 0xd3, 0x36, 0x04, 0xc3, 0x11, 0x14, 0x72, + 0x60, 0xde, 0x93, 0x29, 0x88, 0x98, 0x72, 0xea, 0x9e, 0x4f, 0xc3, 0xee, 0xef, 0x4e, 0xdc, 0x5c, + 0xdc, 0x0d, 0x88, 0x93, 0xf8, 0xe8, 0x21, 0x2c, 0x75, 0x95, 0x1d, 0xc4, 0x9c, 0x14, 0x31, 0x2f, + 0xa6, 0xc5, 0x1c, 0x78, 0x28, 0x7a, 0x41, 0x86, 0x5d, 0xda, 0xee, 0x81, 0xc5, 0x7d, 0x81, 0xd4, + 0xdf, 0x32, 0x30, 0x62, 0x10, 0xbf, 0x84, 0xa5, 0xea, 0x5e, 0x62, 0xa9, 0x7a, 0xff, 0xf9, 0x5f, + 0x98, 0xa1, 0x4b, 0x56, 0xbd, 0x67, 0xc9, 0xfa, 0xf0, 0x05, 0x62, 0x8c, 0x5e, 0xba, 0xfe, 0xc9, + 0xc0, 0x2b, 0xc3, 0x9d, 0xe3, 0x25, 0xec, 0x7a, 0x62, 0xa4, 0x5d, 0xea, 0x19, 0x69, 0x27, 0xc7, + 0x80, 0xf8, 0x7f, 0x29, 0xeb, 0x59, 0xca, 0x7e, 0x57, 0xa0, 0x38, 0xbc, 0x6f, 0x2f, 0x61, 0x49, + 0xfb, 0x2a, 0xb9, 0xa4, 0xbd, 0xf3, 0xfc, 0x24, 0x1b, 0xb2, 0xb4, 0x5d, 0x1d, 0xc5, 0xad, 0x68, + 0xbd, 0x1a, 0xe3, 0x89, 0xfd, 0x75, 0x64, 0xab, 0xc4, 0x36, 0x98, 0xf2, 0x2f, 0x21, 0xe1, 0xfd, + 0x91, 0x6d, 0x54, 0x1a, 0xa4, 0x49, 0x6c, 0x26, 0x09, 0x59, 0x87, 0xe9, 0x46, 0xf0, 0x36, 0xca, + 0x4b, 0xbd, 0x3e, 0xd6, 0x93, 0x34, 0xea, 0x29, 0x0d, 0x9e, 0x61, 0x69, 0x86, 0x43, 0x78, 0xf5, + 0x07, 0x05, 0xd6, 0xd2, 0x2e, 0x2b, 0x3a, 0x18, 0xb0, 0xec, 0xbc, 0xc0, 0x22, 0x3b, 0xfe, 0xf2, + 0xf3, 0xb3, 0x02, 0x47, 0x07, 0xed, 0x14, 0x9c, 0xfe, 0x7c, 0x91, 0x88, 0xb6, 0x80, 0x88, 0xfe, + 0x37, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x0d, 0x33, 0x75, 0xc3, 0xae, 0xee, 0x5a, 0x5f, 0x87, 0xfb, + 0x6d, 0x44, 0xc0, 0x4f, 0xa4, 0x1c, 0x47, 0x16, 0xe8, 0x0a, 0x2c, 0x09, 0xbf, 0x4d, 0x62, 0xd7, + 0x58, 0x5d, 0xf4, 0x4a, 0x5c, 0xe5, 0x5c, 0xfc, 0x1e, 0xdc, 0xec, 0xd1, 0xe3, 0x3e, 0x0f, 0xf5, + 0x5f, 0x05, 0xd0, 0xf3, 0xbc, 0xf3, 0xa7, 0x20, 0x6f, 0xb8, 0x96, 0x58, 0xf6, 0x82, 0x2b, 0x90, + 0xd7, 0xe7, 0x3b, 0xed, 0x52, 0x7e, 0x7d, 0xe7, 0x5a, 0x20, 0xc4, 0xb1, 0x9e, 0x1b, 0x87, 0x4f, + 0x60, 0xf0, 0xd4, 0x49, 0xe3, 0x30, 0x30, 0xc5, 0xb1, 0x1e, 0x5d, 0x86, 0x39, 0xb3, 0xe1, 0x53, + 0x46, 0xbc, 0x5d, 0xd3, 0x71, 0x89, 0x18, 0x19, 0x33, 0xfa, 0x51, 0x59, 0xd3, 0xdc, 0x46, 0x97, + 0x0e, 0x27, 0x2c, 0x91, 0x06, 0xc0, 0x09, 0x4f, 0x5d, 0x83, 0xc7, 0xc9, 0x89, 0x38, 0x0b, 0xfc, + 0xc0, 0xb6, 0x23, 0x29, 0xee, 0xb2, 0x50, 0xef, 0xc3, 0xb1, 0x5d, 0xe2, 0xb5, 0x2c, 0x93, 0xac, + 0x9b, 0xa6, 0xe3, 0xdb, 0x2c, 0x5c, 0x5b, 0xcb, 0x90, 0x8f, 0xcc, 0xe4, 0x9d, 0x38, 0x22, 0xe3, + 0xe7, 0x23, 0x2c, 0x1c, 0xdb, 0x44, 0x97, 0x30, 0x33, 0xfc, 0x12, 0x66, 0x60, 0x3a, 0x86, 0xcf, + 0xee, 0x5b, 0x76, 0x55, 0x22, 0x1f, 0x0f, 0xad, 0xaf, 0x5b, 0x76, 0xf5, 0x59, 0xbb, 0x34, 0x2b, + 0xcd, 0xf8, 0x27, 0x16, 0x86, 0xe8, 0x1a, 0x64, 0x7d, 0x4a, 0x3c, 0x79, 0xbd, 0x4e, 0xa5, 0x91, + 0xf9, 0x36, 0x25, 0x5e, 0xb8, 0xf9, 0xcc, 0x70, 0x64, 0x2e, 0xc0, 0x02, 0x02, 0x6d, 0x41, 0xae, + 0xc6, 0x0f, 0x45, 0x4e, 0xfd, 0xd3, 0x69, 0x58, 0xdd, 0xeb, 0x7c, 0x40, 0x03, 0x21, 0xc1, 0x01, + 0x0a, 0x7a, 0x00, 0x0b, 0x34, 0xd1, 0x42, 0x71, 0x5c, 0x63, 0x6c, 0x32, 0x03, 0x1b, 0xaf, 0xa3, + 0x4e, 0xbb, 0xb4, 0x90, 0x54, 0xe1, 0x9e, 0x00, 0x6a, 0x19, 0x66, 0xbb, 0x0a, 0x4c, 0x9f, 0x7f, + 0xfa, 0x99, 0x47, 0x4f, 0x8b, 0x13, 0x8f, 0x9f, 0x16, 0x27, 0x9e, 0x3c, 0x2d, 0x4e, 0x7c, 0xd3, + 0x29, 0x2a, 0x8f, 0x3a, 0x45, 0xe5, 0x71, 0xa7, 0xa8, 0x3c, 0xe9, 0x14, 0x95, 0x3f, 0x3a, 0x45, + 0xe5, 0xbb, 0x3f, 0x8b, 0x13, 0x77, 0xa6, 0x65, 0x66, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc1, + 0xb4, 0x84, 0x2d, 0xfa, 0x13, 0x00, 0x00, +} + +func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowDistinguisherMethod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowDistinguisherMethod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.DistinguisherMethod != nil { + { + size, err := m.DistinguisherMethod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MatchingPrecedence)) + i-- + dAtA[i] = 0x10 + { + size, err := m.PriorityLevelConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GroupSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Queuing != nil { + { + size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitedPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitedPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NonResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRulesWithSubjects) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRulesWithSubjects) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRulesWithSubjects) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limited != nil { + { + size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueuingConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueuingConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueuingConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.QueueLengthLimit)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HandSize)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Queues)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccount != nil { + { + size, err := m.ServiceAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FlowDistinguisherMethod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PriorityLevelConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MatchingPrecedence)) + if m.DistinguisherMethod != nil { + l = m.DistinguisherMethod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Queuing != nil { + l = m.Queuing.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitedPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) + l = m.LimitResponse.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRulesWithSubjects) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfigurationReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Limited != nil { + l = m.Limited.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityLevelConfigurationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *QueuingConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Queues)) + n += 1 + sovGenerated(uint64(m.HandSize)) + n += 1 + sovGenerated(uint64(m.QueueLengthLimit)) + return n +} + +func (m *ResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccount != nil { + l = m.ServiceAccount.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FlowDistinguisherMethod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowDistinguisherMethod{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchema) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchema{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "FlowSchemaSpec", "FlowSchemaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "FlowSchemaStatus", "FlowSchemaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchemaCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]FlowSchema{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "FlowSchema", "FlowSchema", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&FlowSchemaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRulesWithSubjects{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRulesWithSubjects", "PolicyRulesWithSubjects", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&FlowSchemaSpec{`, + `PriorityLevelConfiguration:` + strings.Replace(strings.Replace(this.PriorityLevelConfiguration.String(), "PriorityLevelConfigurationReference", "PriorityLevelConfigurationReference", 1), `&`, ``, 1) + `,`, + `MatchingPrecedence:` + fmt.Sprintf("%v", this.MatchingPrecedence) + `,`, + `DistinguisherMethod:` + strings.Replace(this.DistinguisherMethod.String(), "FlowDistinguisherMethod", "FlowDistinguisherMethod", 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]FlowSchemaCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "FlowSchemaCondition", "FlowSchemaCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&FlowSchemaStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *GroupSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LimitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitResponse{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitedPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, + `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, + `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRulesWithSubjects) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + repeatedStringForResourceRules := "[]ResourcePolicyRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourcePolicyRule", "ResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourcePolicyRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourcePolicyRule", "NonResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&PolicyRulesWithSubjects{`, + `Subjects:` + repeatedStringForSubjects + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PriorityLevelConfigurationSpec", "PriorityLevelConfigurationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PriorityLevelConfigurationStatus", "PriorityLevelConfigurationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityLevelConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfiguration", "PriorityLevelConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PriorityLevelConfigurationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfigurationCondition", "PriorityLevelConfigurationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *QueuingConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueuingConfiguration{`, + `Queues:` + fmt.Sprintf("%v", this.Queues) + `,`, + `HandSize:` + fmt.Sprintf("%v", this.HandSize) + `,`, + `QueueLengthLimit:` + fmt.Sprintf("%v", this.QueueLengthLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountSubject{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `User:` + strings.Replace(this.User.String(), "UserSubject", "UserSubject", 1) + `,`, + `Group:` + strings.Replace(this.Group.String(), "GroupSubject", "GroupSubject", 1) + `,`, + `ServiceAccount:` + strings.Replace(this.ServiceAccount.String(), "ServiceAccountSubject", "ServiceAccountSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowDistinguisherMethod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowDistinguisherMethod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowDistinguisherMethodType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowSchemaConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, FlowSchema{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityLevelConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PriorityLevelConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchingPrecedence", wireType) + } + m.MatchingPrecedence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchingPrecedence |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinguisherMethod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DistinguisherMethod == nil { + m.DistinguisherMethod = &FlowDistinguisherMethod{} + } + if err := m.DistinguisherMethod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRulesWithSubjects{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, FlowSchemaCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitResponseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Queuing == nil { + m.Queuing = &QueuingConfiguration{} + } + if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) + } + m.AssuredConcurrencyShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AssuredConcurrencyShares |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LimitResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourcePolicyRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourcePolicyRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelConfigurationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityLevelConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelEnablement(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limited", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limited == nil { + m.Limited = &LimitedPriorityLevelConfiguration{} + } + if err := m.Limited.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PriorityLevelConfigurationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueuingConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueuingConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Queues", wireType) + } + m.Queues = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Queues |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HandSize", wireType) + } + m.HandSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HandSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueLengthLimit", wireType) + } + m.QueueLengthLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueLengthLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = SubjectKind(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &UserSubject{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &GroupSubject{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccount == nil { + m.ServiceAccount = &ServiceAccountSubject{} + } + if err := m.ServiceAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto new file mode 100644 index 000000000..7e367b47c --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -0,0 +1,440 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.flowcontrol.v1beta2; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta2"; + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +message FlowDistinguisherMethod { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + optional string type = 1; +} + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +message FlowSchema { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaSpec spec = 2; + + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaStatus status = 3; +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +message FlowSchemaCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// FlowSchemaList is a list of FlowSchema objects. +message FlowSchemaList { + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of FlowSchemas. + repeated FlowSchema items = 2; +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +message FlowSchemaSpec { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1; + + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + optional int32 matchingPrecedence = 2; + + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + optional FlowDistinguisherMethod distinguisherMethod = 3; + + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + repeated PolicyRulesWithSubjects rules = 4; +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +message FlowSchemaStatus { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + repeated FlowSchemaCondition conditions = 1; +} + +// GroupSubject holds detailed information for group-kind subject. +message GroupSubject { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + optional string name = 1; +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +message LimitResponse { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + optional QueuingConfiguration queuing = 2; +} + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +message LimitedPriorityLevelConfiguration { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + optional int32 assuredConcurrencyShares = 1; + + // `limitResponse` indicates what to do with requests that can not be executed right now + optional LimitResponse limitResponse = 2; +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +message NonResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + repeated string nonResourceURLs = 6; +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +message PolicyRulesWithSubjects { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + repeated Subject subjects = 1; + + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + repeated ResourcePolicyRule resourceRules = 2; + + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + repeated NonResourcePolicyRule nonResourceRules = 3; +} + +// PriorityLevelConfiguration represents the configuration of a priority level. +message PriorityLevelConfiguration { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationSpec spec = 2; + + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationStatus status = 3; +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +message PriorityLevelConfigurationCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +message PriorityLevelConfigurationList { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of request-priorities. + repeated PriorityLevelConfiguration items = 2; +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +message PriorityLevelConfigurationReference { + // `name` is the name of the priority level configuration being referenced + // Required. + optional string name = 1; +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +message PriorityLevelConfigurationSpec { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + optional LimitedPriorityLevelConfiguration limited = 2; +} + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +message PriorityLevelConfigurationStatus { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + repeated PriorityLevelConfigurationCondition conditions = 1; +} + +// QueuingConfiguration holds the configuration parameters for queuing +message QueuingConfiguration { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + optional int32 queues = 1; + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + optional int32 handSize = 2; + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + optional int32 queueLengthLimit = 3; +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. +message ResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + repeated string apiGroups = 2; + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + repeated string resources = 3; + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + optional bool clusterScope = 4; + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + repeated string namespaces = 5; +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +message ServiceAccountSubject { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + optional string namespace = 1; + + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + optional string name = 2; +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +message Subject { + // `kind` indicates which one of the other fields is non-empty. + // Required + // +unionDiscriminator + optional string kind = 1; + + // `user` matches based on username. + // +optional + optional UserSubject user = 2; + + // `group` matches based on user group name. + // +optional + optional GroupSubject group = 3; + + // `serviceAccount` matches ServiceAccounts. + // +optional + optional ServiceAccountSubject serviceAccount = 4; +} + +// UserSubject holds detailed information for user-kind subject. +message UserSubject { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + optional string name = 1; +} + diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/register.go b/vendor/k8s.io/api/flowcontrol/v1beta2/register.go new file mode 100644 index 000000000..0ffe55133 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of api group +const GroupName = "flowcontrol.apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &FlowSchema{}, + &FlowSchemaList{}, + &PriorityLevelConfiguration{}, + &PriorityLevelConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go new file mode 100644 index 000000000..408681e99 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go @@ -0,0 +1,579 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// These are valid wildcards. +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + NameAll = "*" + + NamespaceEvery = "*" // matches every particular namespace +) + +// System preset priority level names +const ( + PriorityLevelConfigurationNameExempt = "exempt" + PriorityLevelConfigurationNameCatchAll = "catch-all" + FlowSchemaNameExempt = "exempt" + FlowSchemaNameCatchAll = "catch-all" +) + +// Conditions +const ( + FlowSchemaConditionDangling = "Dangling" + + PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared" +) + +// Constants used by api validation. +const ( + FlowSchemaMaxMatchingPrecedence int32 = 10000 +) + +// Constants for apiserver response headers. +const ( + ResponseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID" + ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" +) + +const ( + // AutoUpdateAnnotationKey is the name of an annotation that enables + // automatic update of the spec of the bootstrap configuration + // object(s), if set to 'true'. + // + // On a fresh install, all bootstrap configuration objects will have auto + // update enabled with the following annotation key: + // apf.kubernetes.io/autoupdate-spec: 'true' + // + // The kube-apiserver periodically checks the bootstrap configuration + // objects on the cluster and applies updates if necessary. + // + // kube-apiserver enforces an 'always auto-update' policy for the + // mandatory configuration object(s). This implies: + // - the auto-update annotation key is added with a value of 'true' + // if it is missing. + // - the auto-update annotation key is set to 'true' if its current value + // is a boolean false or has an invalid boolean representation + // (if the cluster operator sets it to 'false' it will be stomped) + // - any changes to the spec made by the cluster operator will be + // stomped. + // + // The kube-apiserver will apply updates on the suggested configuration if: + // - the cluster operator has enabled auto-update by setting the annotation + // (apf.kubernetes.io/autoupdate-spec: 'true') or + // - the annotation key is missing but the generation is 1 + // + // If the suggested configuration object is missing the annotation key, + // kube-apiserver will update the annotation appropriately: + // - it is set to 'true' if generation of the object is '1' which usually + // indicates that the spec of the object has not been changed. + // - it is set to 'false' if generation of the object is greater than 1. + // + // The goal is to enable the kube-apiserver to apply update on suggested + // configuration objects installed by previous releases but not overwrite + // changes made by the cluster operators. + // Note that this distinction is imperfectly detected: in the case where an + // operator deletes a suggested configuration object and later creates it + // but with a variant spec and then does no updates of the object + // (generation is 1), the technique outlined above will incorrectly + // determine that the object should be auto-updated. + AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +type FlowSchema struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 + +// FlowSchemaList is a list of FlowSchema objects. +type FlowSchemaList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // `items` is a list of FlowSchemas. + Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +type FlowSchemaSpec struct { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"` + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"` + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"` + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"` +} + +// FlowDistinguisherMethodType is the type of flow distinguisher method +type FlowDistinguisherMethodType string + +// These are valid flow-distinguisher methods. +const ( + // FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request. + // This type is used to provide some insulation between users. + FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser" + + // FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the + // object that the request acts upon. If the object is not namespaced, or if the request is a non-resource + // request, then the distinguisher will be the empty string. An example usage of this type is to provide + // some insulation between tenants in a situation where there are multiple tenants and each namespace + // is dedicated to a tenant. + FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace" +) + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +type FlowDistinguisherMethod struct { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"` +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +type PriorityLevelConfigurationReference struct { + // `name` is the name of the priority level configuration being referenced + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +type PolicyRulesWithSubjects struct { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"` + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"` + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"` +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +type Subject struct { + // `kind` indicates which one of the other fields is non-empty. + // Required + // +unionDiscriminator + Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // `user` matches based on username. + // +optional + User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // `group` matches based on user group name. + // +optional + Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // `serviceAccount` matches ServiceAccounts. + // +optional + ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` +} + +// SubjectKind is the kind of subject. +type SubjectKind string + +// Supported subject's kinds. +const ( + SubjectKindUser SubjectKind = "User" + SubjectKindGroup SubjectKind = "Group" + SubjectKindServiceAccount SubjectKind = "ServiceAccount" +) + +// UserSubject holds detailed information for user-kind subject. +type UserSubject struct { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// GroupSubject holds detailed information for group-kind subject. +type GroupSubject struct { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +type ServiceAccountSubject struct { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. +type ResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"` + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"` + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"` +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +type NonResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +type FlowSchemaStatus struct { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +type FlowSchemaCondition struct { + // `type` is the type of the condition. + // Required. + Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type +type FlowSchemaConditionType string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 + +// PriorityLevelConfiguration represents the configuration of a priority level. +type PriorityLevelConfiguration struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +type PriorityLevelConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `items` is a list of request-priorities. + Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +type PriorityLevelConfigurationSpec struct { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` +} + +// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level +type PriorityLevelEnablement string + +// Supported priority level enablement values. +const ( + // PriorityLevelEnablementExempt means that requests are not subject to limits + PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt" + + // PriorityLevelEnablementLimited means that requests are subject to limits + PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited" +) + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +type LimitedPriorityLevelConfiguration struct { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` + + // `limitResponse` indicates what to do with requests that can not be executed right now + LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +type LimitResponse struct { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"` +} + +// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now +type LimitResponseType string + +// Supported limit responses. +const ( + // LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit + LimitResponseTypeQueue LimitResponseType = "Queue" + + // LimitResponseTypeReject means that requests that can not be executed right now are rejected + LimitResponseTypeReject LimitResponseType = "Reject" +) + +// QueuingConfiguration holds the configuration parameters for queuing +type QueuingConfiguration struct { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"` + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"` + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"` +} + +// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type +type PriorityLevelConfigurationConditionType string + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +type PriorityLevelConfigurationStatus struct { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +type PriorityLevelConfigurationCondition struct { + // `type` is the type of the condition. + // Required. + Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// ConditionStatus is the status of the condition. +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go new file mode 100644 index 000000000..4775a8e99 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -0,0 +1,261 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FlowDistinguisherMethod = map[string]string{ + "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", +} + +func (FlowDistinguisherMethod) SwaggerDoc() map[string]string { + return map_FlowDistinguisherMethod +} + +var map_FlowSchema = map[string]string{ + "": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (FlowSchema) SwaggerDoc() map[string]string { + return map_FlowSchema +} + +var map_FlowSchemaCondition = map[string]string{ + "": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (FlowSchemaCondition) SwaggerDoc() map[string]string { + return map_FlowSchemaCondition +} + +var map_FlowSchemaList = map[string]string{ + "": "FlowSchemaList is a list of FlowSchema objects.", + "metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of FlowSchemas.", +} + +func (FlowSchemaList) SwaggerDoc() map[string]string { + return map_FlowSchemaList +} + +var map_FlowSchemaSpec = map[string]string{ + "": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", +} + +func (FlowSchemaSpec) SwaggerDoc() map[string]string { + return map_FlowSchemaSpec +} + +var map_FlowSchemaStatus = map[string]string{ + "": "FlowSchemaStatus represents the current state of a FlowSchema.", + "conditions": "`conditions` is a list of the current states of FlowSchema.", +} + +func (FlowSchemaStatus) SwaggerDoc() map[string]string { + return map_FlowSchemaStatus +} + +var map_GroupSubject = map[string]string{ + "": "GroupSubject holds detailed information for group-kind subject.", + "name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", +} + +func (GroupSubject) SwaggerDoc() map[string]string { + return map_GroupSubject +} + +var map_LimitResponse = map[string]string{ + "": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", +} + +func (LimitResponse) SwaggerDoc() map[string]string { + return map_LimitResponse +} + +var map_LimitedPriorityLevelConfiguration = map[string]string{ + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", +} + +func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_LimitedPriorityLevelConfiguration +} + +var map_NonResourcePolicyRule = map[string]string{ + "": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", +} + +func (NonResourcePolicyRule) SwaggerDoc() map[string]string { + return map_NonResourcePolicyRule +} + +var map_PolicyRulesWithSubjects = map[string]string{ + "": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", +} + +func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string { + return map_PolicyRulesWithSubjects +} + +var map_PriorityLevelConfiguration = map[string]string{ + "": "PriorityLevelConfiguration represents the configuration of a priority level.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (PriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_PriorityLevelConfiguration +} + +var map_PriorityLevelConfigurationCondition = map[string]string{ + "": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationCondition +} + +var map_PriorityLevelConfigurationList = map[string]string{ + "": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of request-priorities.", +} + +func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationList +} + +var map_PriorityLevelConfigurationReference = map[string]string{ + "": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "name": "`name` is the name of the priority level configuration being referenced Required.", +} + +func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationReference +} + +var map_PriorityLevelConfigurationSpec = map[string]string{ + "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", +} + +func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationSpec +} + +var map_PriorityLevelConfigurationStatus = map[string]string{ + "": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "conditions": "`conditions` is the current state of \"request-priority\".", +} + +func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationStatus +} + +var map_QueuingConfiguration = map[string]string{ + "": "QueuingConfiguration holds the configuration parameters for queuing", + "queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", +} + +func (QueuingConfiguration) SwaggerDoc() map[string]string { + return map_QueuingConfiguration +} + +var map_ResourcePolicyRule = map[string]string{ + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", +} + +func (ResourcePolicyRule) SwaggerDoc() map[string]string { + return map_ResourcePolicyRule +} + +var map_ServiceAccountSubject = map[string]string{ + "": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", +} + +func (ServiceAccountSubject) SwaggerDoc() map[string]string { + return map_ServiceAccountSubject +} + +var map_Subject = map[string]string{ + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "`kind` indicates which one of the other fields is non-empty. Required", + "user": "`user` matches based on username.", + "group": "`group` matches based on user group name.", + "serviceAccount": "`serviceAccount` matches ServiceAccounts.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +var map_UserSubject = map[string]string{ + "": "UserSubject holds detailed information for user-kind subject.", + "name": "`name` is the username that matches, or \"*\" to match all usernames. Required.", +} + +func (UserSubject) SwaggerDoc() map[string]string { + return map_UserSubject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..e6288a687 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,542 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod. +func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod { + if in == nil { + return nil + } + out := new(FlowDistinguisherMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchema) DeepCopyInto(out *FlowSchema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema. +func (in *FlowSchema) DeepCopy() *FlowSchema { + if in == nil { + return nil + } + out := new(FlowSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition. +func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition { + if in == nil { + return nil + } + out := new(FlowSchemaCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowSchema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList. +func (in *FlowSchemaList) DeepCopy() *FlowSchemaList { + if in == nil { + return nil + } + out := new(FlowSchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) { + *out = *in + out.PriorityLevelConfiguration = in.PriorityLevelConfiguration + if in.DistinguisherMethod != nil { + in, out := &in.DistinguisherMethod, &out.DistinguisherMethod + *out = new(FlowDistinguisherMethod) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRulesWithSubjects, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec. +func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec { + if in == nil { + return nil + } + out := new(FlowSchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]FlowSchemaCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus. +func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus { + if in == nil { + return nil + } + out := new(FlowSchemaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSubject) DeepCopyInto(out *GroupSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject. +func (in *GroupSubject) DeepCopy() *GroupSubject { + if in == nil { + return nil + } + out := new(GroupSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitResponse) DeepCopyInto(out *LimitResponse) { + *out = *in + if in.Queuing != nil { + in, out := &in.Queuing, &out.Queuing + *out = new(QueuingConfiguration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse. +func (in *LimitResponse) DeepCopy() *LimitResponse { + if in == nil { + return nil + } + out := new(LimitResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { + *out = *in + in.LimitResponse.DeepCopyInto(&out.LimitResponse) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration. +func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(LimitedPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule. +func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule { + if in == nil { + return nil + } + out := new(NonResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects. +func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects { + if in == nil { + return nil + } + out := new(PolicyRulesWithSubjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration. +func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(PriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition. +func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityLevelConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList. +func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference. +func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) { + *out = *in + if in.Limited != nil { + in, out := &in.Limited, &out.Limited + *out = new(LimitedPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec. +func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PriorityLevelConfigurationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus. +func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration. +func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration { + if in == nil { + return nil + } + out := new(QueuingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule. +func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule { + if in == nil { + return nil + } + out := new(ResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject. +func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject { + if in == nil { + return nil + } + out := new(ServiceAccountSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserSubject) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(GroupSubject) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSubject) DeepCopyInto(out *UserSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject. +func (in *UserSubject) DeepCopy() *UserSubject { + if in == nil { + return nil + } + out := new(UserSubject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..00cefde41 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,94 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { + return 1, 26 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { + return 1, 26 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { + return 1, 26 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { + return 1, 26 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { + return 1, 29 +} diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index 8559e92d7..e98ab179e 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -167,16 +167,13 @@ message IngressClassParametersReference { // Scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". - // Field can be enabled with IngressClassNamespacedParams feature gate. // +optional - // +featureGate=IngressClassNamespacedParams optional string scope = 4; // Namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional - // +featureGate=IngressClassNamespacedParams optional string namespace = 5; } diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index 7edbcd597..c49110e5c 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -40,6 +40,7 @@ type NetworkPolicy struct { // PolicyType string describes the NetworkPolicy type // This type is beta-level in 1.8 +// +enum type PolicyType string const ( @@ -373,6 +374,7 @@ type HTTPIngressRuleValue struct { } // PathType represents the type of path referred to by a HTTPIngressPath. +// +enum type PathType string const ( @@ -534,15 +536,12 @@ type IngressClassParametersReference struct { Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". - // Field can be enabled with IngressClassNamespacedParams feature gate. // +optional - // +featureGate=IngressClassNamespacedParams Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` // Namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional - // +featureGate=IngressClassNamespacedParams Namespace *string `json:"namespace,omitempty" protobuf:"bytes,5,opt,name=namespace"` } diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 49b38bd04..38bd9c502 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -103,7 +103,7 @@ var map_IngressClassParametersReference = map[string]string{ "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", "kind": "Kind is the type of resource being referenced.", "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\". Field can be enabled with IngressClassNamespacedParams feature gate.", + "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 76e339aed..82677ca8b 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index 5479ac482..ef8cceff4 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -154,16 +154,12 @@ message IngressClassParametersReference { // Scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". - // Field can be enabled with IngressClassNamespacedParams feature gate. - // +optional - // +featureGate=IngressClassNamespacedParams optional string scope = 4; // Namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional - // +featureGate=IngressClassNamespacedParams optional string namespace = 5; } diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 4646e90b6..1bfdcd091 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -337,15 +337,11 @@ type IngressClassParametersReference struct { Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". - // Field can be enabled with IngressClassNamespacedParams feature gate. - // +optional - // +featureGate=IngressClassNamespacedParams Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` // Namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional - // +featureGate=IngressClassNamespacedParams Namespace *string `json:"namespace,omitempty" protobuf:"bytes,5,opt,name=namespace"` } diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 88c130e89..79c42a6ba 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -94,7 +94,7 @@ var map_IngressClassParametersReference = map[string]string{ "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", "kind": "Kind is the type of resource being referenced.", "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\". Field can be enabled with IngressClassNamespacedParams feature gate.", + "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go index e1b4543d3..77259e368 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go index 5e69fd5d9..e8b4c7ec7 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go index 35084da7e..c1424f045 100644 --- a/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go index 20f805183..966936262 100644 --- a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go index c3989528b..e5034a587 100644 --- a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.prerelease-lifecycle.go index 2fda72ef5..860923d8f 100644 --- a/vendor/k8s.io/api/node/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go index 9b7aac2f1..485e1c938 100644 --- a/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 02d8a85cf..0a6239b87 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go index 8bda4b00f..612061d6c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index db8fd427c..2cf427f12 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -92,7 +92,7 @@ message ClusterRoleList { // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 038baf61b..067b6f15e 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -47,7 +47,7 @@ const ( // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 664cf95f8..228ee54c0 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -80,7 +80,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index 095a5e9c2..eab086899 100644 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 9dea00e05..9795cffd9 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -96,7 +96,7 @@ message ClusterRoleList { // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 57d993caa..13a0a1f0a 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -47,7 +47,7 @@ const ( // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index 8fc984ad5..46b8b9ee6 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -80,7 +80,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index 0358227fa..9288bd017 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index fb975e2ea..53c252554 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -96,7 +96,7 @@ message ClusterRoleList { // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index ad8391fd7..96e6b18f5 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -47,7 +47,7 @@ const ( // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs. + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index eef80f834..5d57cb348 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -80,7 +80,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index 7ffe58106..8abaa90c1 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.prerelease-lifecycle.go index a4d99b35a..3e38fff08 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go index 63bfe6404..e7ff18630 100644 --- a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index 039282397..b130c990e 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go index 6e2008578..ff38c9854 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.prerelease-lifecycle.go index 45969d15c..15aefb108 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 9602c9098..de639354d 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -142,7 +142,7 @@ message CSIDriverSpec { // unset or false and it can be flipped later when storage // capacity information has been published. // - // This field is immutable. + // This field was immutable in Kubernetes <= 1.22 and now is mutable. // // This is a beta field and only available when the CSIStorageCapacity // feature is enabled. The default is false. @@ -154,8 +154,6 @@ message CSIDriverSpec { // Defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. - // This field is beta, and is only honored by servers - // that enable the CSIVolumeFSGroupPolicy feature gate. // // This field is immutable. // diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index d805e1539..6da0657ec 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -90,6 +90,7 @@ type StorageClassList struct { } // VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +// +enum type VolumeBindingMode string const ( @@ -341,7 +342,7 @@ type CSIDriverSpec struct { // unset or false and it can be flipped later when storage // capacity information has been published. // - // This field is immutable. + // This field was immutable in Kubernetes <= 1.22 and now is mutable. // // This is a beta field and only available when the CSIStorageCapacity // feature is enabled. The default is false. @@ -353,8 +354,6 @@ type CSIDriverSpec struct { // Defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. - // This field is beta, and is only honored by servers - // that enable the CSIVolumeFSGroupPolicy feature gate. // // This field is immutable. // diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index d3747dc86..ed5b18cb7 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -52,8 +52,8 @@ var map_CSIDriverSpec = map[string]string{ "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field is immutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is beta, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.", + "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", } diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index f4de94216..300f42cbb 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index 64a34670b..d9bc94b25 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go index 44311b4bb..41114c3c6 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index f72ca6b23..bfe8280d5 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -144,7 +144,7 @@ message CSIDriverSpec { // unset or false and it can be flipped later when storage // capacity information has been published. // - // This field is immutable. + // This field was immutable in Kubernetes <= 1.22 and now is mutable. // // This is a beta field and only available when the CSIStorageCapacity // feature is enabled. The default is false. @@ -156,8 +156,6 @@ message CSIDriverSpec { // Defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. - // This field is beta, and is only honored by servers - // that enable the CSIVolumeFSGroupPolicy feature gate. // // This field is immutable. // diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 9fe5646ad..524d8b534 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -362,7 +362,7 @@ type CSIDriverSpec struct { // unset or false and it can be flipped later when storage // capacity information has been published. // - // This field is immutable. + // This field was immutable in Kubernetes <= 1.22 and now is mutable. // // This is a beta field and only available when the CSIStorageCapacity // feature is enabled. The default is false. @@ -374,8 +374,6 @@ type CSIDriverSpec struct { // Defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. - // This field is beta, and is only honored by servers - // that enable the CSIVolumeFSGroupPolicy feature gate. // // This field is immutable. // diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 9e1efa25b..f4dbf0fef 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -52,8 +52,8 @@ var map_CSIDriverSpec = map[string]string{ "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field is immutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is beta, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.", + "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", } diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 9b7e675ee..5411ed8c0 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go index 275b5d5db..7b69b2cbc 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index 31eddfc1e..97e17be39 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -44,6 +44,28 @@ type APIStatus interface { var _ error = &StatusError{} +var knownReasons = map[metav1.StatusReason]struct{}{ + // metav1.StatusReasonUnknown : {} + metav1.StatusReasonUnauthorized: {}, + metav1.StatusReasonForbidden: {}, + metav1.StatusReasonNotFound: {}, + metav1.StatusReasonAlreadyExists: {}, + metav1.StatusReasonConflict: {}, + metav1.StatusReasonGone: {}, + metav1.StatusReasonInvalid: {}, + metav1.StatusReasonServerTimeout: {}, + metav1.StatusReasonTimeout: {}, + metav1.StatusReasonTooManyRequests: {}, + metav1.StatusReasonBadRequest: {}, + metav1.StatusReasonMethodNotAllowed: {}, + metav1.StatusReasonNotAcceptable: {}, + metav1.StatusReasonRequestEntityTooLarge: {}, + metav1.StatusReasonUnsupportedMediaType: {}, + metav1.StatusReasonInternalError: {}, + metav1.StatusReasonExpired: {}, + metav1.StatusReasonServiceUnavailable: {}, +} + // Error implements the Error interface. func (e *StatusError) Error() string { return e.ErrStatus.Message @@ -148,6 +170,25 @@ func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *Stat }} } +// NewGenerateNameConflict returns an error indicating the server +// was not able to generate a valid name for a resource. +func NewGenerateNameConflict(qualifiedResource schema.GroupResource, name string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonAlreadyExists, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + RetryAfterSeconds: int32(retryAfterSeconds), + }, + Message: fmt.Sprintf( + "%s %q already exists, the server was not able to generate a unique name for the object", + qualifiedResource.String(), name), + }} +} + // NewUnauthorized returns an error indicating the client is not authorized to perform the requested // action. func NewUnauthorized(reason string) *StatusError { @@ -248,7 +289,7 @@ func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorLis Field: err.Field, }) } - return &StatusError{metav1.Status{ + err := &StatusError{metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusUnprocessableEntity, Reason: metav1.StatusReasonInvalid, @@ -258,8 +299,14 @@ func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorLis Name: name, Causes: causes, }, - Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()), }} + aggregatedErrs := errs.ToAggregate() + if aggregatedErrs == nil { + err.ErrStatus.Message = fmt.Sprintf("%s %q is invalid", qualifiedKind.String(), name) + } else { + err.ErrStatus.Message = fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, aggregatedErrs) + } + return err } // NewBadRequest creates an error that indicates that the request is invalid and can not be processed. @@ -478,7 +525,14 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr // IsNotFound returns true if the specified error was created by NewNotFound. // It supports wrapped errors. func IsNotFound(err error) bool { - return ReasonForError(err) == metav1.StatusReasonNotFound + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonNotFound { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusNotFound { + return true + } + return false } // IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. @@ -490,19 +544,40 @@ func IsAlreadyExists(err error) bool { // IsConflict determines if the err is an error which indicates the provided update conflicts. // It supports wrapped errors. func IsConflict(err error) bool { - return ReasonForError(err) == metav1.StatusReasonConflict + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonConflict { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusConflict { + return true + } + return false } // IsInvalid determines if the err is an error which indicates the provided resource is not valid. // It supports wrapped errors. func IsInvalid(err error) bool { - return ReasonForError(err) == metav1.StatusReasonInvalid + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonInvalid { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusUnprocessableEntity { + return true + } + return false } // IsGone is true if the error indicates the requested resource is no longer available. // It supports wrapped errors. func IsGone(err error) bool { - return ReasonForError(err) == metav1.StatusReasonGone + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonGone { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusGone { + return true + } + return false } // IsResourceExpired is true if the error indicates the resource has expired and the current action is @@ -515,77 +590,147 @@ func IsResourceExpired(err error) bool { // IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header // It supports wrapped errors. func IsNotAcceptable(err error) bool { - return ReasonForError(err) == metav1.StatusReasonNotAcceptable + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonNotAcceptable { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusNotAcceptable { + return true + } + return false } // IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header // It supports wrapped errors. func IsUnsupportedMediaType(err error) bool { - return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonUnsupportedMediaType { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusUnsupportedMediaType { + return true + } + return false } // IsMethodNotSupported determines if the err is an error which indicates the provided action could not // be performed because it is not supported by the server. // It supports wrapped errors. func IsMethodNotSupported(err error) bool { - return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonMethodNotAllowed { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusMethodNotAllowed { + return true + } + return false } // IsServiceUnavailable is true if the error indicates the underlying service is no longer available. // It supports wrapped errors. func IsServiceUnavailable(err error) bool { - return ReasonForError(err) == metav1.StatusReasonServiceUnavailable + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonServiceUnavailable { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusServiceUnavailable { + return true + } + return false } // IsBadRequest determines if err is an error which indicates that the request is invalid. // It supports wrapped errors. func IsBadRequest(err error) bool { - return ReasonForError(err) == metav1.StatusReasonBadRequest + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonBadRequest { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusBadRequest { + return true + } + return false } // IsUnauthorized determines if err is an error which indicates that the request is unauthorized and // requires authentication by the user. // It supports wrapped errors. func IsUnauthorized(err error) bool { - return ReasonForError(err) == metav1.StatusReasonUnauthorized + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonUnauthorized { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusUnauthorized { + return true + } + return false } // IsForbidden determines if err is an error which indicates that the request is forbidden and cannot // be completed as requested. // It supports wrapped errors. func IsForbidden(err error) bool { - return ReasonForError(err) == metav1.StatusReasonForbidden + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonForbidden { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusForbidden { + return true + } + return false } // IsTimeout determines if err is an error which indicates that request times out due to long // processing. // It supports wrapped errors. func IsTimeout(err error) bool { - return ReasonForError(err) == metav1.StatusReasonTimeout + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonTimeout { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusGatewayTimeout { + return true + } + return false } // IsServerTimeout determines if err is an error which indicates that the request needs to be retried // by the client. // It supports wrapped errors. func IsServerTimeout(err error) bool { + // do not check the status code, because no https status code exists that can + // be scoped to retryable timeouts. return ReasonForError(err) == metav1.StatusReasonServerTimeout } // IsInternalError determines if err is an error which indicates an internal server error. // It supports wrapped errors. func IsInternalError(err error) bool { - return ReasonForError(err) == metav1.StatusReasonInternalError + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonInternalError { + return true + } + if _, ok := knownReasons[reason]; !ok && code == http.StatusInternalServerError { + return true + } + return false } // IsTooManyRequests determines if err is an error which indicates that there are too many requests // that the server cannot handle. // It supports wrapped errors. func IsTooManyRequests(err error) bool { - if ReasonForError(err) == metav1.StatusReasonTooManyRequests { + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonTooManyRequests { return true } - if status := APIStatus(nil); errors.As(err, &status) { - return status.Status().Code == http.StatusTooManyRequests + + // IsTooManyRequests' checking of code predates the checking of the code in + // the other Is* functions. In order to maintain backward compatibility, this + // does not check that the reason is unknown. + if code == http.StatusTooManyRequests { + return true } return false } @@ -594,11 +739,16 @@ func IsTooManyRequests(err error) bool { // the request entity is too large. // It supports wrapped errors. func IsRequestEntityTooLargeError(err error) bool { - if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge { + reason, code := reasonAndCodeForError(err) + if reason == metav1.StatusReasonRequestEntityTooLarge { return true } - if status := APIStatus(nil); errors.As(err, &status) { - return status.Status().Code == http.StatusRequestEntityTooLarge + + // IsRequestEntityTooLargeError's checking of code predates the checking of + // the code in the other Is* functions. In order to maintain backward + // compatibility, this does not check that the reason is unknown. + if code == http.StatusRequestEntityTooLarge { + return true } return false } @@ -653,6 +803,13 @@ func ReasonForError(err error) metav1.StatusReason { return metav1.StatusReasonUnknown } +func reasonAndCodeForError(err error) (metav1.StatusReason, int32) { + if status := APIStatus(nil); errors.As(err, &status) { + return status.Status().Reason, status.Status().Code + } + return metav1.StatusReasonUnknown, 0 +} + // ErrorReporter converts generic errors into runtime.Object errors without // requiring the caller to take a dependency on meta/v1 (where Status lives). // This prevents circular dependencies in core watch code. diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go index fd2210022..1bc816fe3 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go @@ -23,6 +23,10 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" ) +var ( + _ ResettableRESTMapper = &FirstHitRESTMapper{} +) + // FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the // first successful result for the singular requests type FirstHitRESTMapper struct { @@ -75,6 +79,10 @@ func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) return nil, collapseAggregateErrors(errors) } +func (m FirstHitRESTMapper) Reset() { + m.MultiRESTMapper.Reset() +} + // collapseAggregateErrors returns the minimal errors. it handles empty as nil, handles one item in a list // by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same) func collapseAggregateErrors(errors []error) error { diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go index 42eac3af0..a35ce3bd0 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -132,3 +132,12 @@ type RESTMapper interface { ResourceSingularizer(resource string) (singular string, err error) } + +// ResettableRESTMapper is a RESTMapper which is capable of resetting itself +// from discovery. +// All rest mappers that delegate to other rest mappers must implement this interface and dynamically +// check if the delegate mapper supports the Reset() operation. +type ResettableRESTMapper interface { + RESTMapper + Reset() +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go index 431a0a635..a4298114b 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go @@ -32,7 +32,7 @@ type lazyObject struct { mapper RESTMapper } -// NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by +// NewLazyRESTMapperLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by // returning those initialization errors when the interface methods are invoked. This defers the // initialization and any server calls until a client actually needs to perform the action. func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper { @@ -52,7 +52,7 @@ func (o *lazyObject) init() error { return o.err } -var _ RESTMapper = &lazyObject{} +var _ ResettableRESTMapper = &lazyObject{} func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { if err := o.init(); err != nil { @@ -102,3 +102,11 @@ func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err } return o.mapper.ResourceSingularizer(resource) } + +func (o *lazyObject) Reset() { + o.lock.Lock() + defer o.lock.Unlock() + if o.loaded && o.err == nil { + MaybeResetRESTMapper(o.mapper) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go index 6b01bf197..b7e971250 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go @@ -24,11 +24,15 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" ) +var ( + _ ResettableRESTMapper = MultiRESTMapper{} +) + // MultiRESTMapper is a wrapper for multiple RESTMappers. type MultiRESTMapper []RESTMapper func (m MultiRESTMapper) String() string { - nested := []string{} + nested := make([]string, 0, len(m)) for _, t := range m { currString := fmt.Sprintf("%v", t) splitStrings := strings.Split(currString, "\n") @@ -208,3 +212,9 @@ func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ( } return allMappings, nil } + +func (m MultiRESTMapper) Reset() { + for _, t := range m { + MaybeResetRESTMapper(t) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go index fa11c580f..4f097c9c9 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go @@ -29,6 +29,10 @@ const ( AnyKind = "*" ) +var ( + _ ResettableRESTMapper = PriorityRESTMapper{} +) + // PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind // when multiple matches are possible type PriorityRESTMapper struct { @@ -220,3 +224,7 @@ func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.Group func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { return m.Delegate.KindsFor(partiallySpecifiedResource) } + +func (m PriorityRESTMapper) Reset() { + MaybeResetRESTMapper(m.Delegate) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go index 00bd86f51..f41b9bf78 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -519,3 +519,12 @@ func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string } return mappings, nil } + +// MaybeResetRESTMapper calls Reset() on the mapper if it is a ResettableRESTMapper. +func MaybeResetRESTMapper(mapper RESTMapper) bool { + m, ok := mapper.(ResettableRESTMapper) + if ok { + m.Reset() + } + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 2e09f4fac..172db57fa 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -61,8 +61,32 @@ func (m *Quantity) XXX_DiscardUnknown() { var xxx_messageInfo_Quantity proto.InternalMessageInfo +func (m *QuantityValue) Reset() { *m = QuantityValue{} } +func (*QuantityValue) ProtoMessage() {} +func (*QuantityValue) Descriptor() ([]byte, []int) { + return fileDescriptor_612bba87bd70906c, []int{1} +} +func (m *QuantityValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QuantityValue.Unmarshal(m, b) +} +func (m *QuantityValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QuantityValue.Marshal(b, m, deterministic) +} +func (m *QuantityValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuantityValue.Merge(m, src) +} +func (m *QuantityValue) XXX_Size() int { + return xxx_messageInfo_QuantityValue.Size(m) +} +func (m *QuantityValue) XXX_DiscardUnknown() { + xxx_messageInfo_QuantityValue.DiscardUnknown(m) +} + +var xxx_messageInfo_QuantityValue proto.InternalMessageInfo + func init() { proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") + proto.RegisterType((*QuantityValue)(nil), "k8s.io.apimachinery.pkg.api.resource.QuantityValue") } func init() { @@ -70,20 +94,21 @@ func init() { } var fileDescriptor_612bba87bd70906c = []byte{ - // 237 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30, - 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b, - 0xc5, 0xc8, 0xce, 0x00, 0x23, 0x5b, 0x92, 0x1e, 0xae, 0x15, 0xd5, 0x8e, 0x2e, 0x36, 0x52, 0xb7, - 0x8e, 0x8c, 0x1d, 0x19, 0x9b, 0xbf, 0xe9, 0xd8, 0xb1, 0x03, 0x03, 0x31, 0x3f, 0x82, 0xea, 0x36, - 0x52, 0xb7, 0x7b, 0xef, 0xf4, 0x4e, 0x97, 0xbd, 0xd4, 0xd3, 0x56, 0x1a, 0xa7, 0xea, 0x50, 0x12, - 0x5b, 0xf2, 0xd4, 0xaa, 0x4f, 0xb2, 0x33, 0xc7, 0xea, 0xb4, 0x28, 0x1a, 0xb3, 0x28, 0xaa, 0xb9, - 0xb1, 0xc4, 0x4b, 0xd5, 0xd4, 0xfa, 0x20, 0x14, 0x53, 0xeb, 0x02, 0x57, 0xa4, 0x34, 0x59, 0xe2, - 0xc2, 0xd3, 0x4c, 0x36, 0xec, 0xbc, 0x1b, 0xdf, 0x1f, 0x2b, 0x79, 0x5e, 0xc9, 0xa6, 0xd6, 0x07, - 0x21, 0x87, 0xea, 0xf6, 0x51, 0x1b, 0x3f, 0x0f, 0xa5, 0xac, 0xdc, 0x42, 0x69, 0xa7, 0x9d, 0x4a, - 0x71, 0x19, 0x3e, 0x12, 0x25, 0x48, 0xd3, 0xf1, 0xe8, 0xdd, 0x34, 0x1b, 0xbd, 0x86, 0xc2, 0x7a, - 0xe3, 0x97, 0xe3, 0xeb, 0xec, 0xa2, 0xf5, 0x6c, 0xac, 0xbe, 0x11, 0x13, 0xf1, 0x70, 0xf9, 0x76, - 0xa2, 0xa7, 0xab, 0xef, 0x4d, 0x0e, 0x5f, 0x5d, 0x0e, 0xeb, 0x2e, 0x87, 0x4d, 0x97, 0xc3, 0xea, - 0x67, 0x02, 0xcf, 0x72, 0xdb, 0x23, 0xec, 0x7a, 0x84, 0x7d, 0x8f, 0xb0, 0x8a, 0x28, 0xb6, 0x11, - 0xc5, 0x2e, 0xa2, 0xd8, 0x47, 0x14, 0xbf, 0x11, 0xc5, 0xfa, 0x0f, 0xe1, 0x7d, 0x34, 0x3c, 0xf6, - 0x1f, 0x00, 0x00, 0xff, 0xff, 0x3c, 0x08, 0x88, 0x49, 0x0e, 0x01, 0x00, 0x00, + // 254 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xf2, 0xcd, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xca, 0x4b, 0x2d, 0x49, 0x2d, 0xd6, 0x2f, 0x4b, + 0xcd, 0x4b, 0xc9, 0x2f, 0xd2, 0x87, 0x4a, 0x24, 0x16, 0x64, 0xe6, 0x26, 0x26, 0x67, 0x64, 0xe6, + 0xa5, 0x16, 0x55, 0xea, 0x17, 0x64, 0xa7, 0x83, 0x04, 0xf4, 0x8b, 0x52, 0x8b, 0xf3, 0x4b, 0x8b, + 0x92, 0x53, 0xf5, 0xd3, 0x53, 0xf3, 0x52, 0x8b, 0x12, 0x4b, 0x52, 0x53, 0xf4, 0x0a, 0x8a, 0xf2, + 0x4b, 0xf2, 0x85, 0x54, 0x20, 0xba, 0xf4, 0x90, 0x75, 0xe9, 0x15, 0x64, 0xa7, 0x83, 0x04, 0xf4, + 0x60, 0xba, 0xa4, 0x74, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, + 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x18, + 0xaa, 0x64, 0xc1, 0xc5, 0x11, 0x58, 0x9a, 0x98, 0x57, 0x92, 0x59, 0x52, 0x29, 0x24, 0xc6, 0xc5, + 0x56, 0x5c, 0x52, 0x94, 0x99, 0x97, 0x2e, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe5, 0x59, + 0x89, 0xcc, 0x58, 0x20, 0xcf, 0xd0, 0xb1, 0x50, 0x9e, 0x61, 0xc2, 0x42, 0x79, 0x86, 0x05, 0x0b, + 0xe5, 0x19, 0x1a, 0xee, 0x28, 0x30, 0x28, 0xd9, 0x72, 0xf1, 0xc2, 0x74, 0x86, 0x25, 0xe6, 0x94, + 0xa6, 0x92, 0xa6, 0xdd, 0x49, 0xef, 0xc4, 0x43, 0x39, 0x86, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, + 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x37, + 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x14, 0x07, 0xcc, 0x5f, + 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0x76, 0x9f, 0x66, 0x4d, 0x01, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 472104d54..54240b7b5 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -86,3 +86,15 @@ message Quantity { optional string string = 1; } +// QuantityValue makes it possible to use a Quantity as value for a command +// line parameter. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true +message QuantityValue { + optional string string = 1; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 2395656cc..6d43868ba 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -460,17 +460,7 @@ func (q *Quantity) AsApproximateFloat64() float64 { return base } - // multiply by the appropriate exponential scale - switch q.Format { - case DecimalExponent, DecimalSI: - return base * math.Pow10(exponent) - default: - // fast path for exponents that can fit in 64 bits - if exponent > 0 && exponent < 7 { - return base * float64(int64(1)<<(exponent*10)) - } - return base * math.Pow(2, float64(exponent*10)) - } + return base * math.Pow10(exponent) } // AsInt64 returns a representation of the current value as an int64 if a fast conversion @@ -774,3 +764,30 @@ func (q *Quantity) SetScaled(value int64, scale Scale) { q.d.Dec = nil q.i = int64Amount{value: value, scale: scale} } + +// QuantityValue makes it possible to use a Quantity as value for a command +// line parameter. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true +type QuantityValue struct { + Quantity +} + +// Set implements pflag.Value.Set and Go flag.Value.Set. +func (q *QuantityValue) Set(s string) error { + quantity, err := ParseQuantity(s) + if err != nil { + return err + } + q.Quantity = quantity + return nil +} + +// Type implements pflag.Value.Type. +func (q QuantityValue) Type() string { + return "quantity" +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go index ab4740790..abb00f38e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -25,3 +26,20 @@ func (in *Quantity) DeepCopyInto(out *Quantity) { *out = in.DeepCopy() return } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuantityValue) DeepCopyInto(out *QuantityValue) { + *out = *in + out.Quantity = in.Quantity.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuantityValue. +func (in *QuantityValue) DeepCopy() *QuantityValue { + if in == nil { + return nil + } + out := new(QuantityValue) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index a9b28f244..6d212b846 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index d5e4fc680..6e1eac5c7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 326f68812..9e7924c12 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -1326,184 +1326,186 @@ func init() { } var fileDescriptor_cf52fa777ced5367 = []byte{ - // 2829 bytes of a gzipped FileDescriptorProto + // 2859 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0xcb, 0x6f, 0x24, 0x47, 0xf9, 0xee, 0x19, 0x8f, 0x3d, 0xf3, 0x8d, 0xc7, 0x8f, 0x5a, 0xef, 0xef, 0x37, 0x6b, 0x84, 0xc7, 0xe9, 0xa0, 0x68, 0x03, 0xc9, 0x38, 0x5e, 0x42, 0xb4, 0xd9, 0x90, 0x80, 0xc7, 0xb3, 0xde, 0x98, 0xac, 0x63, 0xab, 0xbc, 0xbb, 0x40, 0x88, 0x50, 0xda, 0xdd, 0xe5, 0x71, 0xe3, 0x9e, 0xee, 0x49, - 0x55, 0x8f, 0x37, 0x03, 0x07, 0x72, 0x00, 0x01, 0x12, 0x44, 0xe1, 0xc6, 0x09, 0x25, 0x82, 0xbf, - 0x80, 0x0b, 0xfc, 0x01, 0x48, 0xe4, 0x18, 0xc4, 0x25, 0x12, 0x68, 0x94, 0x98, 0x03, 0x47, 0xc4, - 0xd5, 0x17, 0x50, 0x3d, 0xba, 0xbb, 0x7a, 0x1e, 0xeb, 0x9e, 0xec, 0x12, 0x71, 0x9b, 0xfe, 0xde, - 0x55, 0xf5, 0xd5, 0xf7, 0xaa, 0x81, 0xdd, 0x93, 0xeb, 0xac, 0xee, 0x06, 0xeb, 0x27, 0xdd, 0x43, - 0x42, 0x7d, 0x12, 0x12, 0xb6, 0x7e, 0x4a, 0x7c, 0x27, 0xa0, 0xeb, 0x0a, 0x61, 0x75, 0xdc, 0xb6, - 0x65, 0x1f, 0xbb, 0x3e, 0xa1, 0xbd, 0xf5, 0xce, 0x49, 0x8b, 0x03, 0xd8, 0x7a, 0x9b, 0x84, 0xd6, - 0xfa, 0xe9, 0xc6, 0x7a, 0x8b, 0xf8, 0x84, 0x5a, 0x21, 0x71, 0xea, 0x1d, 0x1a, 0x84, 0x01, 0xfa, - 0x82, 0xe4, 0xaa, 0xeb, 0x5c, 0xf5, 0xce, 0x49, 0x8b, 0x03, 0x58, 0x9d, 0x73, 0xd5, 0x4f, 0x37, - 0x56, 0x9e, 0x6e, 0xb9, 0xe1, 0x71, 0xf7, 0xb0, 0x6e, 0x07, 0xed, 0xf5, 0x56, 0xd0, 0x0a, 0xd6, - 0x05, 0xf3, 0x61, 0xf7, 0x48, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x19, 0x6b, 0x0a, 0xed, - 0xfa, 0xa1, 0xdb, 0x26, 0x83, 0x56, 0xac, 0x3c, 0x77, 0x11, 0x03, 0xb3, 0x8f, 0x49, 0xdb, 0x1a, - 0xe4, 0x33, 0xff, 0x94, 0x87, 0xe2, 0xe6, 0xfe, 0xce, 0x2d, 0x1a, 0x74, 0x3b, 0x68, 0x0d, 0xa6, - 0x7d, 0xab, 0x4d, 0xaa, 0xc6, 0x9a, 0x71, 0xb5, 0xd4, 0x98, 0xfb, 0xa0, 0x5f, 0x9b, 0x3a, 0xeb, - 0xd7, 0xa6, 0x5f, 0xb5, 0xda, 0x04, 0x0b, 0x0c, 0xf2, 0xa0, 0x78, 0x4a, 0x28, 0x73, 0x03, 0x9f, - 0x55, 0x73, 0x6b, 0xf9, 0xab, 0xe5, 0x6b, 0x2f, 0xd5, 0xb3, 0xac, 0xbf, 0x2e, 0x14, 0xdc, 0x93, - 0xac, 0xdb, 0x01, 0x6d, 0xba, 0xcc, 0x0e, 0x4e, 0x09, 0xed, 0x35, 0x16, 0x95, 0x96, 0xa2, 0x42, - 0x32, 0x1c, 0x6b, 0x40, 0x3f, 0x32, 0x60, 0xb1, 0x43, 0xc9, 0x11, 0xa1, 0x94, 0x38, 0x0a, 0x5f, - 0xcd, 0xaf, 0x19, 0x8f, 0x40, 0x6d, 0x55, 0xa9, 0x5d, 0xdc, 0x1f, 0x90, 0x8f, 0x87, 0x34, 0xa2, - 0xdf, 0x18, 0xb0, 0xc2, 0x08, 0x3d, 0x25, 0x74, 0xd3, 0x71, 0x28, 0x61, 0xac, 0xd1, 0xdb, 0xf2, - 0x5c, 0xe2, 0x87, 0x5b, 0x3b, 0x4d, 0xcc, 0xaa, 0xd3, 0x62, 0x1f, 0xbe, 0x96, 0xcd, 0xa0, 0x83, - 0x71, 0x72, 0x1a, 0xa6, 0xb2, 0x68, 0x65, 0x2c, 0x09, 0xc3, 0x0f, 0x30, 0xc3, 0x3c, 0x82, 0xb9, - 0xe8, 0x20, 0x6f, 0xbb, 0x2c, 0x44, 0xf7, 0x60, 0xa6, 0xc5, 0x3f, 0x58, 0xd5, 0x10, 0x06, 0xd6, - 0xb3, 0x19, 0x18, 0xc9, 0x68, 0xcc, 0x2b, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a, 0x9a, 0xf9, 0xb3, - 0x69, 0x28, 0x6f, 0xee, 0xef, 0x60, 0xc2, 0x82, 0x2e, 0xb5, 0x49, 0x06, 0xa7, 0xb9, 0x0e, 0x73, - 0xcc, 0xf5, 0x5b, 0x5d, 0xcf, 0xa2, 0x1c, 0x5a, 0x9d, 0x11, 0x94, 0xcb, 0x8a, 0x72, 0xee, 0x40, - 0xc3, 0xe1, 0x14, 0x25, 0xba, 0x06, 0xc0, 0x25, 0xb0, 0x8e, 0x65, 0x13, 0xa7, 0x9a, 0x5b, 0x33, - 0xae, 0x16, 0x1b, 0x48, 0xf1, 0xc1, 0xab, 0x31, 0x06, 0x6b, 0x54, 0xe8, 0x71, 0x28, 0x08, 0x4b, - 0xab, 0x45, 0xa1, 0xa6, 0xa2, 0xc8, 0x0b, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x93, 0x30, 0xab, 0xbc, - 0xac, 0x5a, 0x12, 0x64, 0x0b, 0x8a, 0x6c, 0x36, 0x72, 0x83, 0x08, 0xcf, 0xd7, 0x77, 0xe2, 0xfa, - 0x8e, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0xb8, 0xbe, 0x83, 0x05, 0x06, 0xdd, 0x86, 0xc2, 0x29, 0xa1, - 0x87, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x29, 0xdb, 0x46, 0xdf, 0xe3, 0x2c, 0x8d, 0x12, 0x37, 0x4d, - 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0xe3, 0x80, 0x86, 0x62, 0x79, 0xd5, 0xc2, 0x5a, 0xfe, - 0x6a, 0xa9, 0x31, 0xcf, 0xd7, 0x7b, 0x10, 0x43, 0xb1, 0x46, 0xc1, 0xe9, 0x6d, 0x2b, 0x24, 0xad, - 0x80, 0xba, 0x84, 0x55, 0x67, 0x13, 0xfa, 0xad, 0x18, 0x8a, 0x35, 0x0a, 0xf4, 0x0d, 0x40, 0x2c, - 0x0c, 0xa8, 0xd5, 0x22, 0x6a, 0xa9, 0x2f, 0x5b, 0xec, 0xb8, 0x0a, 0x62, 0x75, 0x2b, 0x6a, 0x75, - 0xe8, 0x60, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef, 0xae, - 0xc3, 0x5c, 0x4b, 0xbb, 0x75, 0xca, 0x2f, 0xe2, 0xd3, 0xd6, 0x6f, 0x24, 0x4e, 0x51, 0x22, 0x02, - 0x25, 0xaa, 0x24, 0x45, 0xd1, 0x65, 0x23, 0xb3, 0xd3, 0x46, 0x36, 0x24, 0x9a, 0x34, 0x20, 0xc3, - 0x89, 0x64, 0xf3, 0x1f, 0x86, 0x70, 0xe0, 0x28, 0xde, 0xa0, 0xab, 0x5a, 0x4c, 0x33, 0xc4, 0xf6, - 0xcd, 0x8d, 0x89, 0x47, 0x17, 0x04, 0x82, 0xdc, 0xff, 0x44, 0x20, 0xb8, 0x51, 0xfc, 0xd5, 0x7b, - 0xb5, 0xa9, 0xb7, 0xff, 0xb6, 0x36, 0x65, 0xfe, 0xd2, 0x80, 0xb9, 0xcd, 0x4e, 0xc7, 0xeb, 0xed, - 0x75, 0x42, 0xb1, 0x00, 0x13, 0x66, 0x1c, 0xda, 0xc3, 0x5d, 0x5f, 0x2d, 0x14, 0xf8, 0xfd, 0x6e, - 0x0a, 0x08, 0x56, 0x18, 0x7e, 0x7f, 0x8e, 0x02, 0x6a, 0x13, 0x75, 0xdd, 0xe2, 0xfb, 0xb3, 0xcd, - 0x81, 0x58, 0xe2, 0xf8, 0x21, 0x1f, 0xb9, 0xc4, 0x73, 0x76, 0x2d, 0xdf, 0x6a, 0x11, 0xaa, 0x2e, - 0x47, 0xbc, 0xf5, 0xdb, 0x1a, 0x0e, 0xa7, 0x28, 0xcd, 0x7f, 0xe7, 0xa0, 0xb4, 0x15, 0xf8, 0x8e, - 0x1b, 0xaa, 0xcb, 0x15, 0xf6, 0x3a, 0x43, 0xc1, 0xe3, 0x4e, 0xaf, 0x43, 0xb0, 0xc0, 0xa0, 0xe7, - 0x61, 0x86, 0x85, 0x56, 0xd8, 0x65, 0xc2, 0x9e, 0x52, 0xe3, 0xb1, 0x28, 0x2c, 0x1d, 0x08, 0xe8, - 0x79, 0xbf, 0xb6, 0x10, 0x8b, 0x93, 0x20, 0xac, 0x18, 0xb8, 0xa7, 0x07, 0x87, 0x62, 0xa3, 0x9c, - 0x5b, 0x32, 0xed, 0x45, 0xf9, 0x23, 0x9f, 0x78, 0xfa, 0xde, 0x10, 0x05, 0x1e, 0xc1, 0x85, 0x4e, - 0x01, 0x79, 0x16, 0x0b, 0xef, 0x50, 0xcb, 0x67, 0x42, 0xd7, 0x1d, 0xb7, 0x4d, 0xd4, 0x85, 0xff, - 0x62, 0xb6, 0x13, 0xe7, 0x1c, 0x89, 0xde, 0xdb, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8, 0x09, 0x98, - 0xa1, 0xc4, 0x62, 0x81, 0x5f, 0x2d, 0x88, 0xe5, 0xc7, 0x51, 0x19, 0x0b, 0x28, 0x56, 0x58, 0x1e, - 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0x85, 0xd7, 0x38, 0xa0, 0xed, 0x4a, 0x30, 0x8e, 0xf0, 0x66, - 0x1b, 0x2a, 0x5b, 0x94, 0x58, 0x21, 0x99, 0xc4, 0x2b, 0x3e, 0xfd, 0x81, 0xff, 0x24, 0x0f, 0x95, - 0x26, 0xf1, 0x48, 0xa2, 0x6f, 0x1b, 0x50, 0x8b, 0x5a, 0x36, 0xd9, 0x27, 0xd4, 0x0d, 0x9c, 0x03, - 0x62, 0x07, 0xbe, 0xc3, 0x84, 0x0b, 0xe4, 0x1b, 0xff, 0xc7, 0xf7, 0xe6, 0xd6, 0x10, 0x16, 0x8f, - 0xe0, 0x40, 0x1e, 0x54, 0x3a, 0x54, 0xfc, 0x16, 0xfb, 0x25, 0x3d, 0xa4, 0x7c, 0xed, 0xcb, 0xd9, - 0x8e, 0x63, 0x5f, 0x67, 0x6d, 0x2c, 0x9d, 0xf5, 0x6b, 0x95, 0x14, 0x08, 0xa7, 0x85, 0xa3, 0xaf, - 0xc3, 0x62, 0x40, 0x3b, 0xc7, 0x96, 0xdf, 0x24, 0x1d, 0xe2, 0x3b, 0xc4, 0x0f, 0x99, 0xd8, 0x85, - 0x62, 0x63, 0x99, 0xd7, 0x11, 0x7b, 0x03, 0x38, 0x3c, 0x44, 0x8d, 0x5e, 0x83, 0xa5, 0x0e, 0x0d, - 0x3a, 0x56, 0x4b, 0xb8, 0xd4, 0x7e, 0xe0, 0xb9, 0x76, 0x4f, 0xb8, 0x50, 0xa9, 0xf1, 0xd4, 0x59, - 0xbf, 0xb6, 0xb4, 0x3f, 0x88, 0x3c, 0xef, 0xd7, 0x2e, 0x89, 0xad, 0xe3, 0x90, 0x04, 0x89, 0x87, - 0xc5, 0x68, 0x67, 0x58, 0x18, 0x77, 0x86, 0xe6, 0x0e, 0x14, 0x9b, 0x5d, 0xe5, 0xcf, 0x2f, 0x42, - 0xd1, 0x51, 0xbf, 0xd5, 0xce, 0x47, 0x17, 0x2b, 0xa6, 0x39, 0xef, 0xd7, 0x2a, 0xbc, 0x74, 0xac, - 0x47, 0x00, 0x1c, 0xb3, 0x98, 0x4f, 0x40, 0x51, 0x1c, 0x39, 0xbb, 0xb7, 0x81, 0x16, 0x21, 0x8f, - 0xad, 0xfb, 0x42, 0xca, 0x1c, 0xe6, 0x3f, 0xb5, 0x08, 0xb4, 0x07, 0x70, 0x8b, 0x84, 0xd1, 0xc1, - 0x6f, 0xc2, 0x42, 0x14, 0x86, 0xd3, 0xd9, 0xe1, 0xff, 0x95, 0xee, 0x05, 0x9c, 0x46, 0xe3, 0x41, - 0x7a, 0xf3, 0x75, 0x28, 0x89, 0x0c, 0xc2, 0xd3, 0x6f, 0x92, 0xea, 0x8d, 0x07, 0xa4, 0xfa, 0x28, - 0x7f, 0xe7, 0xc6, 0xe5, 0x6f, 0xcd, 0x5c, 0x0f, 0x2a, 0x92, 0x37, 0x2a, 0x6e, 0x32, 0x69, 0x78, - 0x0a, 0x8a, 0x91, 0x99, 0x4a, 0x4b, 0x5c, 0xd4, 0x46, 0x82, 0x70, 0x4c, 0xa1, 0x69, 0x3b, 0x86, - 0x54, 0x36, 0xcc, 0xa6, 0x4c, 0xab, 0x5c, 0x72, 0x0f, 0xae, 0x5c, 0x34, 0x4d, 0x3f, 0x84, 0xea, - 0xb8, 0x4a, 0xf8, 0x21, 0xf2, 0x75, 0x76, 0x53, 0xcc, 0x77, 0x0c, 0x58, 0xd4, 0x25, 0x65, 0x3f, - 0xbe, 0xec, 0x4a, 0x2e, 0xae, 0xd4, 0xb4, 0x1d, 0xf9, 0xb5, 0x01, 0xcb, 0xa9, 0xa5, 0x4d, 0x74, - 0xe2, 0x13, 0x18, 0xa5, 0x3b, 0x47, 0x7e, 0x02, 0xe7, 0xf8, 0x4b, 0x0e, 0x2a, 0xb7, 0xad, 0x43, - 0xe2, 0x1d, 0x10, 0x8f, 0xd8, 0x61, 0x40, 0xd1, 0x0f, 0xa0, 0xdc, 0xb6, 0x42, 0xfb, 0x58, 0x40, - 0xa3, 0xaa, 0xbe, 0x99, 0x2d, 0xd8, 0xa5, 0x24, 0xd5, 0x77, 0x13, 0x31, 0x37, 0xfd, 0x90, 0xf6, - 0x1a, 0x97, 0x94, 0x49, 0x65, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0x15, 0x13, 0xdf, 0x37, 0xdf, 0xea, - 0xf0, 0x92, 0x63, 0xf2, 0x0e, 0x30, 0x65, 0x02, 0x26, 0x6f, 0x76, 0x5d, 0x4a, 0xda, 0xc4, 0x0f, - 0x93, 0x56, 0x6c, 0x77, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xf2, 0x12, 0x2c, 0x0e, 0x1a, 0xcf, 0xe3, - 0xcf, 0x09, 0xe9, 0xc9, 0xf3, 0xc2, 0xfc, 0x27, 0x5a, 0x86, 0xc2, 0xa9, 0xe5, 0x75, 0xd5, 0x6d, - 0xc4, 0xf2, 0xe3, 0x46, 0xee, 0xba, 0x61, 0xfe, 0xd6, 0x80, 0xea, 0x38, 0x43, 0xd0, 0xe7, 0x35, - 0x41, 0x8d, 0xb2, 0xb2, 0x2a, 0xff, 0x0a, 0xe9, 0x49, 0xa9, 0x37, 0xa1, 0x18, 0x74, 0x78, 0x3d, - 0x10, 0x50, 0x75, 0xea, 0x4f, 0x46, 0x27, 0xb9, 0xa7, 0xe0, 0xe7, 0xfd, 0xda, 0xe5, 0x94, 0xf8, - 0x08, 0x81, 0x63, 0x56, 0x1e, 0xa9, 0x85, 0x3d, 0x3c, 0x7b, 0xc4, 0x91, 0xfa, 0x9e, 0x80, 0x60, - 0x85, 0x31, 0xff, 0x60, 0xc0, 0xb4, 0x28, 0xa6, 0x5f, 0x87, 0x22, 0xdf, 0x3f, 0xc7, 0x0a, 0x2d, - 0x61, 0x57, 0xe6, 0x36, 0x8e, 0x73, 0xef, 0x92, 0xd0, 0x4a, 0xbc, 0x2d, 0x82, 0xe0, 0x58, 0x22, - 0xc2, 0x50, 0x70, 0x43, 0xd2, 0x8e, 0x0e, 0xf2, 0xe9, 0xb1, 0xa2, 0xd5, 0x10, 0xa1, 0x8e, 0xad, - 0xfb, 0x37, 0xdf, 0x0a, 0x89, 0xcf, 0x0f, 0x23, 0xb9, 0x1a, 0x3b, 0x5c, 0x06, 0x96, 0xa2, 0xcc, - 0x7f, 0x19, 0x10, 0xab, 0xe2, 0xce, 0xcf, 0x88, 0x77, 0x74, 0xdb, 0xf5, 0x4f, 0xd4, 0xb6, 0xc6, - 0xe6, 0x1c, 0x28, 0x38, 0x8e, 0x29, 0x46, 0xa5, 0x87, 0xdc, 0x64, 0xe9, 0x81, 0x2b, 0xb4, 0x03, - 0x3f, 0x74, 0xfd, 0xee, 0xd0, 0x6d, 0xdb, 0x52, 0x70, 0x1c, 0x53, 0xf0, 0x42, 0x84, 0x92, 0xb6, - 0xe5, 0xfa, 0xae, 0xdf, 0xe2, 0x8b, 0xd8, 0x0a, 0xba, 0x7e, 0x28, 0x32, 0xb2, 0x2a, 0x44, 0xf0, - 0x10, 0x16, 0x8f, 0xe0, 0x30, 0x7f, 0x3f, 0x0d, 0x65, 0xbe, 0xe6, 0x28, 0xcf, 0xbd, 0x00, 0x15, - 0x4f, 0xf7, 0x02, 0xb5, 0xf6, 0xcb, 0xca, 0x94, 0xf4, 0xbd, 0xc6, 0x69, 0x5a, 0xce, 0x2c, 0xea, - 0xa7, 0x98, 0x39, 0x97, 0x66, 0xde, 0xd6, 0x91, 0x38, 0x4d, 0xcb, 0xa3, 0xd7, 0x7d, 0x7e, 0x3f, - 0x54, 0x65, 0x12, 0x1f, 0xd1, 0x37, 0x39, 0x10, 0x4b, 0x1c, 0xda, 0x85, 0x4b, 0x96, 0xe7, 0x05, - 0xf7, 0x05, 0xb0, 0x11, 0x04, 0x27, 0x6d, 0x8b, 0x9e, 0x30, 0xd1, 0x08, 0x17, 0x1b, 0x9f, 0x53, - 0x2c, 0x97, 0x36, 0x87, 0x49, 0xf0, 0x28, 0xbe, 0x51, 0xc7, 0x36, 0x3d, 0xe1, 0xb1, 0x1d, 0xc3, - 0xf2, 0x00, 0x48, 0xdc, 0x72, 0xd5, 0x95, 0x3e, 0xab, 0xe4, 0x2c, 0xe3, 0x11, 0x34, 0xe7, 0x63, - 0xe0, 0x78, 0xa4, 0x44, 0x74, 0x03, 0xe6, 0xb9, 0x27, 0x07, 0xdd, 0x30, 0xaa, 0x3b, 0x0b, 0xe2, - 0xb8, 0xd1, 0x59, 0xbf, 0x36, 0x7f, 0x27, 0x85, 0xc1, 0x03, 0x94, 0x7c, 0x73, 0x3d, 0xb7, 0xed, - 0x86, 0xd5, 0x59, 0xc1, 0x12, 0x6f, 0xee, 0x6d, 0x0e, 0xc4, 0x12, 0x97, 0xf2, 0xc0, 0xe2, 0x45, - 0x1e, 0x68, 0xfe, 0x39, 0x0f, 0x48, 0x16, 0xca, 0x8e, 0xac, 0xa7, 0x64, 0x48, 0xe3, 0xd5, 0xbc, - 0x2a, 0xb4, 0x8d, 0x81, 0x6a, 0x5e, 0xd5, 0xd8, 0x11, 0x1e, 0xed, 0x42, 0x49, 0x86, 0x96, 0xe4, - 0xba, 0xac, 0x2b, 0xe2, 0xd2, 0x5e, 0x84, 0x38, 0xef, 0xd7, 0x56, 0x52, 0x6a, 0x62, 0x8c, 0xe8, - 0xb4, 0x12, 0x09, 0xe8, 0x1a, 0x80, 0xd5, 0x71, 0xf5, 0x59, 0x5b, 0x29, 0x99, 0xb8, 0x24, 0x5d, - 0x33, 0xd6, 0xa8, 0xd0, 0xcb, 0x30, 0x1d, 0x7e, 0xba, 0x6e, 0xa8, 0x28, 0x9a, 0x3d, 0xde, 0xfb, - 0x08, 0x09, 0x5c, 0xbb, 0xf0, 0x67, 0xc6, 0xcd, 0x52, 0x8d, 0x4c, 0xac, 0x7d, 0x3b, 0xc6, 0x60, - 0x8d, 0x0a, 0x7d, 0x0b, 0x8a, 0x47, 0xaa, 0x14, 0x15, 0x07, 0x93, 0x39, 0x44, 0x46, 0x05, 0xac, - 0x6c, 0xf7, 0xa3, 0x2f, 0x1c, 0x4b, 0x43, 0x5f, 0x81, 0x32, 0xeb, 0x1e, 0xc6, 0xd9, 0x5b, 0x9e, - 0x66, 0x9c, 0x2a, 0x0f, 0x12, 0x14, 0xd6, 0xe9, 0xcc, 0x37, 0xa1, 0xb4, 0xeb, 0xda, 0x34, 0x10, - 0xfd, 0xdb, 0x93, 0x30, 0xcb, 0x52, 0x0d, 0x4e, 0x7c, 0x92, 0x91, 0x97, 0x45, 0x78, 0xee, 0x5e, - 0xbe, 0xe5, 0x07, 0xb2, 0x8d, 0x29, 0x24, 0xee, 0xf5, 0x2a, 0x07, 0x62, 0x89, 0xbb, 0xb1, 0xcc, - 0x0b, 0x84, 0x9f, 0xbe, 0x5f, 0x9b, 0x7a, 0xf7, 0xfd, 0xda, 0xd4, 0x7b, 0xef, 0xab, 0x62, 0xe1, - 0x1c, 0x00, 0xf6, 0x0e, 0xbf, 0x47, 0x6c, 0x19, 0x76, 0x33, 0x8d, 0xe4, 0xa2, 0x49, 0xb0, 0x18, - 0xc9, 0xe5, 0x06, 0x8a, 0x3e, 0x0d, 0x87, 0x53, 0x94, 0x68, 0x1d, 0x4a, 0xf1, 0xb0, 0x4d, 0xf9, - 0xc7, 0x52, 0xe4, 0x6f, 0xf1, 0x44, 0x0e, 0x27, 0x34, 0xa9, 0x1c, 0x30, 0x7d, 0x61, 0x0e, 0x68, - 0x40, 0xbe, 0xeb, 0x3a, 0xaa, 0xd9, 0x7d, 0x26, 0xca, 0xc1, 0x77, 0x77, 0x9a, 0xe7, 0xfd, 0xda, - 0x63, 0xe3, 0x66, 0xdc, 0x61, 0xaf, 0x43, 0x58, 0xfd, 0xee, 0x4e, 0x13, 0x73, 0xe6, 0x51, 0x01, - 0x69, 0x66, 0xc2, 0x80, 0x74, 0x0d, 0xa0, 0x95, 0x8c, 0x0c, 0xe4, 0x7d, 0x8f, 0x1d, 0x51, 0x1b, - 0x15, 0x68, 0x54, 0x88, 0xc1, 0x92, 0xcd, 0xfb, 0x6a, 0xd5, 0xba, 0xb3, 0xd0, 0x6a, 0xcb, 0x21, - 0xe4, 0x64, 0x77, 0xe2, 0x8a, 0x52, 0xb3, 0xb4, 0x35, 0x28, 0x0c, 0x0f, 0xcb, 0x47, 0x01, 0x2c, - 0x39, 0xaa, 0x43, 0x4c, 0x94, 0x96, 0x26, 0x56, 0x7a, 0x99, 0x2b, 0x6c, 0x0e, 0x0a, 0xc2, 0xc3, - 0xb2, 0xd1, 0x77, 0x61, 0x25, 0x02, 0x0e, 0xb7, 0xe9, 0x22, 0x60, 0xe7, 0x1b, 0xab, 0x67, 0xfd, - 0xda, 0x4a, 0x73, 0x2c, 0x15, 0x7e, 0x80, 0x04, 0xe4, 0xc0, 0x8c, 0x27, 0x0b, 0xdc, 0xb2, 0x28, - 0x4a, 0xbe, 0x9a, 0x6d, 0x15, 0x89, 0xf7, 0xd7, 0xf5, 0xc2, 0x36, 0x1e, 0x97, 0xa8, 0x9a, 0x56, - 0xc9, 0x46, 0x6f, 0x41, 0xd9, 0xf2, 0xfd, 0x20, 0xb4, 0xe4, 0xe0, 0x60, 0x4e, 0xa8, 0xda, 0x9c, - 0x58, 0xd5, 0x66, 0x22, 0x63, 0xa0, 0x90, 0xd6, 0x30, 0x58, 0x57, 0x85, 0xee, 0xc3, 0x42, 0x70, - 0xdf, 0x27, 0x14, 0x93, 0x23, 0x42, 0x89, 0x6f, 0x13, 0x56, 0xad, 0x08, 0xed, 0xcf, 0x66, 0xd4, - 0x9e, 0x62, 0x4e, 0x5c, 0x3a, 0x0d, 0x67, 0x78, 0x50, 0x0b, 0xaa, 0xf3, 0xd8, 0xea, 0x5b, 0x9e, - 0xfb, 0x7d, 0x42, 0x59, 0x75, 0x3e, 0x99, 0x13, 0x6f, 0xc7, 0x50, 0xac, 0x51, 0xf0, 0xe8, 0x67, - 0x7b, 0x5d, 0x16, 0x12, 0x39, 0xb4, 0x5f, 0x48, 0x47, 0xbf, 0xad, 0x04, 0x85, 0x75, 0x3a, 0xd4, - 0x85, 0x4a, 0x5b, 0xcf, 0x34, 0xd5, 0x25, 0xb1, 0xba, 0xeb, 0xd9, 0x56, 0x37, 0x9c, 0x0b, 0x93, - 0xc2, 0x27, 0x85, 0xc3, 0x69, 0x2d, 0x2b, 0xcf, 0x43, 0xf9, 0x53, 0xf6, 0x04, 0xbc, 0xa7, 0x18, - 0x3c, 0xc7, 0x89, 0x7a, 0x8a, 0x3f, 0xe6, 0x60, 0x3e, 0xbd, 0xfb, 0x03, 0x59, 0xb4, 0x90, 0x29, - 0x8b, 0x46, 0xdd, 0xab, 0x31, 0xf6, 0x9d, 0x21, 0x0a, 0xeb, 0xf9, 0xb1, 0x61, 0x5d, 0x45, 0xcf, - 0xe9, 0x87, 0x89, 0x9e, 0x75, 0x00, 0x5e, 0x9e, 0xd0, 0xc0, 0xf3, 0x08, 0x15, 0x81, 0xb3, 0xa8, - 0xde, 0x13, 0x62, 0x28, 0xd6, 0x28, 0x78, 0x11, 0x7d, 0xe8, 0x05, 0xf6, 0x89, 0xd8, 0x82, 0xe8, - 0xd2, 0x8b, 0x90, 0x59, 0x94, 0x45, 0x74, 0x63, 0x08, 0x8b, 0x47, 0x70, 0x98, 0x3d, 0xb8, 0xbc, - 0x6f, 0xd1, 0xd0, 0xb5, 0xbc, 0xe4, 0x82, 0x89, 0x2e, 0xe5, 0x8d, 0xa1, 0x1e, 0xe8, 0x99, 0x49, - 0x2f, 0x6a, 0xb2, 0xf9, 0x09, 0x2c, 0xe9, 0x83, 0xcc, 0xbf, 0x1a, 0x70, 0x65, 0xa4, 0xee, 0xcf, - 0xa0, 0x07, 0x7b, 0x23, 0xdd, 0x83, 0xbd, 0x90, 0x71, 0x78, 0x39, 0xca, 0xda, 0x31, 0x1d, 0xd9, - 0x2c, 0x14, 0xf6, 0x79, 0xed, 0x6b, 0xfe, 0xc2, 0x80, 0x39, 0xf1, 0x6b, 0x92, 0xc1, 0x6f, 0x2d, - 0xfd, 0x1c, 0x50, 0x7a, 0x84, 0x4f, 0x01, 0xef, 0x18, 0x90, 0x1e, 0xb9, 0xa2, 0x97, 0xa4, 0xff, - 0x1a, 0xf1, 0x4c, 0x74, 0x42, 0xdf, 0x7d, 0x71, 0x5c, 0x07, 0x79, 0x29, 0xd3, 0x70, 0xf1, 0x29, - 0x28, 0xe1, 0x20, 0x08, 0xf7, 0xad, 0xf0, 0x98, 0xf1, 0x85, 0x77, 0xf8, 0x0f, 0xb5, 0x37, 0x62, - 0xe1, 0x02, 0x83, 0x25, 0xdc, 0xfc, 0xb9, 0x01, 0x57, 0xc6, 0x3e, 0xd1, 0xf0, 0x10, 0x60, 0xc7, - 0x5f, 0x6a, 0x45, 0xb1, 0x17, 0x26, 0x74, 0x58, 0xa3, 0xe2, 0xad, 0x5f, 0xea, 0x5d, 0x67, 0xb0, - 0xf5, 0x4b, 0x69, 0xc3, 0x69, 0x5a, 0xf3, 0x9f, 0x39, 0x50, 0x6f, 0x22, 0xff, 0x65, 0x8f, 0x7d, - 0x62, 0xe0, 0x45, 0x66, 0x3e, 0xfd, 0x22, 0x13, 0x3f, 0xbf, 0x68, 0x4f, 0x12, 0xf9, 0x07, 0x3f, - 0x49, 0xa0, 0xe7, 0xe2, 0x57, 0x0e, 0x19, 0xba, 0x56, 0xd3, 0xaf, 0x1c, 0xe7, 0xfd, 0xda, 0x9c, - 0x12, 0x9e, 0x7e, 0xf5, 0x78, 0x0d, 0x66, 0x1d, 0x12, 0x5a, 0xae, 0x27, 0xdb, 0xb8, 0xcc, 0xb3, - 0x7f, 0x29, 0xac, 0x29, 0x59, 0x1b, 0x65, 0x6e, 0x93, 0xfa, 0xc0, 0x91, 0x40, 0x1e, 0x6d, 0xed, - 0xc0, 0x91, 0x5d, 0x48, 0x21, 0x89, 0xb6, 0x5b, 0x81, 0x43, 0xb0, 0xc0, 0x98, 0xef, 0x1a, 0x50, - 0x96, 0x92, 0xb6, 0xac, 0x2e, 0x23, 0x68, 0x23, 0x5e, 0x85, 0x3c, 0xee, 0x2b, 0xfa, 0x73, 0xd6, - 0x79, 0xbf, 0x56, 0x12, 0x64, 0xa2, 0x81, 0x19, 0xf1, 0x6c, 0x93, 0xbb, 0x60, 0x8f, 0x1e, 0x87, - 0x82, 0xb8, 0x3d, 0x6a, 0x33, 0x93, 0x77, 0x39, 0x0e, 0xc4, 0x12, 0x67, 0x7e, 0x9c, 0x83, 0x4a, - 0x6a, 0x71, 0x19, 0x7a, 0x81, 0x78, 0xe2, 0x99, 0xcb, 0x30, 0x45, 0x1f, 0xff, 0x0a, 0xae, 0x72, - 0xcf, 0xcc, 0xc3, 0xe4, 0x9e, 0x6f, 0xc3, 0x8c, 0xcd, 0xf7, 0x28, 0xfa, 0x53, 0xc5, 0xc6, 0x24, - 0xc7, 0x29, 0x76, 0x37, 0xf1, 0x46, 0xf1, 0xc9, 0xb0, 0x12, 0x88, 0x6e, 0xc1, 0x12, 0x25, 0x21, - 0xed, 0x6d, 0x1e, 0x85, 0x84, 0xea, 0xbd, 0x7f, 0x21, 0xa9, 0xb8, 0xf1, 0x20, 0x01, 0x1e, 0xe6, - 0x31, 0x0f, 0x61, 0xee, 0x8e, 0x75, 0xe8, 0xc5, 0xaf, 0x59, 0x18, 0x2a, 0xae, 0x6f, 0x7b, 0x5d, - 0x87, 0xc8, 0x68, 0x1c, 0x45, 0xaf, 0xe8, 0xd2, 0xee, 0xe8, 0xc8, 0xf3, 0x7e, 0xed, 0x52, 0x0a, - 0x20, 0x9f, 0x6f, 0x70, 0x5a, 0x84, 0xe9, 0xc1, 0xf4, 0x67, 0xd8, 0x3d, 0x7e, 0x07, 0x4a, 0x49, - 0x7d, 0xff, 0x88, 0x55, 0x9a, 0x6f, 0x40, 0x91, 0x7b, 0x7c, 0xd4, 0x97, 0x5e, 0x50, 0xe2, 0xa4, - 0x0b, 0xa7, 0x5c, 0x96, 0xc2, 0xc9, 0x6c, 0x43, 0xe5, 0x6e, 0xc7, 0x79, 0xc8, 0xf7, 0xcc, 0x5c, - 0xe6, 0xac, 0x75, 0x0d, 0xe4, 0xff, 0x35, 0x78, 0x82, 0x90, 0x99, 0x5b, 0x4b, 0x10, 0x7a, 0xe2, - 0xd5, 0x86, 0xf9, 0x3f, 0x36, 0x00, 0xc4, 0xd4, 0xec, 0xe6, 0x29, 0xf1, 0xc3, 0x0c, 0xaf, 0xde, - 0x77, 0x61, 0x26, 0x90, 0xde, 0x24, 0xdf, 0x34, 0x27, 0x1c, 0xcd, 0xc6, 0x97, 0x40, 0xfa, 0x13, - 0x56, 0xc2, 0x1a, 0x57, 0x3f, 0xf8, 0x64, 0x75, 0xea, 0xc3, 0x4f, 0x56, 0xa7, 0x3e, 0xfa, 0x64, - 0x75, 0xea, 0xed, 0xb3, 0x55, 0xe3, 0x83, 0xb3, 0x55, 0xe3, 0xc3, 0xb3, 0x55, 0xe3, 0xa3, 0xb3, - 0x55, 0xe3, 0xe3, 0xb3, 0x55, 0xe3, 0xdd, 0xbf, 0xaf, 0x4e, 0xbd, 0x96, 0x3b, 0xdd, 0xf8, 0x4f, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x18, 0xc5, 0x8c, 0x25, 0x27, 0x00, 0x00, + 0x55, 0x8f, 0x37, 0x03, 0x07, 0x72, 0x00, 0x01, 0x12, 0x8a, 0xc2, 0x8d, 0x13, 0x4a, 0x04, 0x7f, + 0x00, 0xe2, 0x02, 0x7f, 0x00, 0x12, 0x39, 0x06, 0x71, 0x89, 0x04, 0x1a, 0x25, 0xe6, 0xc0, 0x11, + 0x71, 0xf5, 0x05, 0x54, 0x8f, 0xee, 0xae, 0x9e, 0xc7, 0xba, 0x27, 0xbb, 0x44, 0xdc, 0xa6, 0xbf, + 0x77, 0x55, 0x7d, 0xf5, 0xbd, 0x6a, 0x60, 0xf7, 0xe4, 0x3a, 0xab, 0xbb, 0xc1, 0xfa, 0x49, 0xf7, + 0x90, 0x50, 0x9f, 0x84, 0x84, 0xad, 0x9f, 0x12, 0xdf, 0x09, 0xe8, 0xba, 0x42, 0x58, 0x1d, 0xb7, + 0x6d, 0xd9, 0xc7, 0xae, 0x4f, 0x68, 0x6f, 0xbd, 0x73, 0xd2, 0xe2, 0x00, 0xb6, 0xde, 0x26, 0xa1, + 0xb5, 0x7e, 0xba, 0xb1, 0xde, 0x22, 0x3e, 0xa1, 0x56, 0x48, 0x9c, 0x7a, 0x87, 0x06, 0x61, 0x80, + 0xbe, 0x20, 0xb9, 0xea, 0x3a, 0x57, 0xbd, 0x73, 0xd2, 0xe2, 0x00, 0x56, 0xe7, 0x5c, 0xf5, 0xd3, + 0x8d, 0x95, 0xa7, 0x5b, 0x6e, 0x78, 0xdc, 0x3d, 0xac, 0xdb, 0x41, 0x7b, 0xbd, 0x15, 0xb4, 0x82, + 0x75, 0xc1, 0x7c, 0xd8, 0x3d, 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x57, 0xc6, 0x9a, 0x42, + 0xbb, 0x7e, 0xe8, 0xb6, 0xc9, 0xa0, 0x15, 0x2b, 0xcf, 0x5d, 0xc4, 0xc0, 0xec, 0x63, 0xd2, 0xb6, + 0x06, 0xf9, 0xcc, 0x3f, 0xe5, 0xa1, 0xb8, 0xb9, 0xbf, 0x73, 0x8b, 0x06, 0xdd, 0x0e, 0x5a, 0x83, + 0x69, 0xdf, 0x6a, 0x93, 0xaa, 0xb1, 0x66, 0x5c, 0x2d, 0x35, 0xe6, 0x3e, 0xe8, 0xd7, 0xa6, 0xce, + 0xfa, 0xb5, 0xe9, 0x57, 0xad, 0x36, 0xc1, 0x02, 0x83, 0x3c, 0x28, 0x9e, 0x12, 0xca, 0xdc, 0xc0, + 0x67, 0xd5, 0xdc, 0x5a, 0xfe, 0x6a, 0xf9, 0xda, 0x4b, 0xf5, 0x2c, 0xeb, 0xaf, 0x0b, 0x05, 0xf7, + 0x24, 0xeb, 0x76, 0x40, 0x9b, 0x2e, 0xb3, 0x83, 0x53, 0x42, 0x7b, 0x8d, 0x45, 0xa5, 0xa5, 0xa8, + 0x90, 0x0c, 0xc7, 0x1a, 0xd0, 0x8f, 0x0c, 0x58, 0xec, 0x50, 0x72, 0x44, 0x28, 0x25, 0x8e, 0xc2, + 0x57, 0xf3, 0x6b, 0xc6, 0x23, 0x50, 0x5b, 0x55, 0x6a, 0x17, 0xf7, 0x07, 0xe4, 0xe3, 0x21, 0x8d, + 0xe8, 0xd7, 0x06, 0xac, 0x30, 0x42, 0x4f, 0x09, 0xdd, 0x74, 0x1c, 0x4a, 0x18, 0x6b, 0xf4, 0xb6, + 0x3c, 0x97, 0xf8, 0xe1, 0xd6, 0x4e, 0x13, 0xb3, 0xea, 0xb4, 0xd8, 0x87, 0xaf, 0x65, 0x33, 0xe8, + 0x60, 0x9c, 0x9c, 0x86, 0xa9, 0x2c, 0x5a, 0x19, 0x4b, 0xc2, 0xf0, 0x03, 0xcc, 0x30, 0x8f, 0x60, + 0x2e, 0x3a, 0xc8, 0xdb, 0x2e, 0x0b, 0xd1, 0x3d, 0x98, 0x69, 0xf1, 0x0f, 0x56, 0x35, 0x84, 0x81, + 0xf5, 0x6c, 0x06, 0x46, 0x32, 0x1a, 0xf3, 0xca, 0x9e, 0x19, 0xf1, 0xc9, 0xb0, 0x92, 0x66, 0xfe, + 0x6c, 0x1a, 0xca, 0x9b, 0xfb, 0x3b, 0x98, 0xb0, 0xa0, 0x4b, 0x6d, 0x92, 0xc1, 0x69, 0xae, 0xc3, + 0x1c, 0x73, 0xfd, 0x56, 0xd7, 0xb3, 0x28, 0x87, 0x56, 0x67, 0x04, 0xe5, 0xb2, 0xa2, 0x9c, 0x3b, + 0xd0, 0x70, 0x38, 0x45, 0x89, 0xae, 0x01, 0x70, 0x09, 0xac, 0x63, 0xd9, 0xc4, 0xa9, 0xe6, 0xd6, + 0x8c, 0xab, 0xc5, 0x06, 0x52, 0x7c, 0xf0, 0x6a, 0x8c, 0xc1, 0x1a, 0x15, 0x7a, 0x1c, 0x0a, 0xc2, + 0xd2, 0x6a, 0x51, 0xa8, 0xa9, 0x28, 0xf2, 0x82, 0x58, 0x06, 0x96, 0x38, 0xf4, 0x24, 0xcc, 0x2a, + 0x2f, 0xab, 0x96, 0x04, 0xd9, 0x82, 0x22, 0x9b, 0x8d, 0xdc, 0x20, 0xc2, 0xf3, 0xf5, 0x9d, 0xb8, + 0xbe, 0x23, 0xfc, 0x4e, 0x5b, 0xdf, 0x2b, 0xae, 0xef, 0x60, 0x81, 0x41, 0xb7, 0xa1, 0x70, 0x4a, + 0xe8, 0x21, 0xf7, 0x04, 0xee, 0x9a, 0x5f, 0xca, 0xb6, 0xd1, 0xf7, 0x38, 0x4b, 0xa3, 0xc4, 0x4d, + 0x13, 0x3f, 0xb1, 0x14, 0x82, 0xea, 0x00, 0xec, 0x38, 0xa0, 0xa1, 0x58, 0x5e, 0xb5, 0xb0, 0x96, + 0xbf, 0x5a, 0x6a, 0xcc, 0xf3, 0xf5, 0x1e, 0xc4, 0x50, 0xac, 0x51, 0x70, 0x7a, 0xdb, 0x0a, 0x49, + 0x2b, 0xa0, 0x2e, 0x61, 0xd5, 0xd9, 0x84, 0x7e, 0x2b, 0x86, 0x62, 0x8d, 0x02, 0x7d, 0x03, 0x10, + 0x0b, 0x03, 0x6a, 0xb5, 0x88, 0x5a, 0xea, 0xcb, 0x16, 0x3b, 0xae, 0x82, 0x58, 0xdd, 0x8a, 0x5a, + 0x1d, 0x3a, 0x18, 0xa2, 0xc0, 0x23, 0xb8, 0xcc, 0xdf, 0x19, 0xb0, 0xa0, 0xf9, 0x82, 0xf0, 0xbb, + 0xeb, 0x30, 0xd7, 0xd2, 0x6e, 0x9d, 0xf2, 0x8b, 0xf8, 0xb4, 0xf5, 0x1b, 0x89, 0x53, 0x94, 0x88, + 0x40, 0x89, 0x2a, 0x49, 0x51, 0x74, 0xd9, 0xc8, 0xec, 0xb4, 0x91, 0x0d, 0x89, 0x26, 0x0d, 0xc8, + 0x70, 0x22, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0x8a, 0x37, 0xe8, 0xaa, 0x16, 0xd3, 0x0c, 0xb1, + 0x7d, 0x73, 0x63, 0xe2, 0xd1, 0x05, 0x81, 0x20, 0xf7, 0x3f, 0x11, 0x08, 0x6e, 0x14, 0x7f, 0xf9, + 0x5e, 0x6d, 0xea, 0xed, 0xbf, 0xad, 0x4d, 0x99, 0xbf, 0x30, 0x60, 0x6e, 0xb3, 0xd3, 0xf1, 0x7a, + 0x7b, 0x9d, 0x50, 0x2c, 0xc0, 0x84, 0x19, 0x87, 0xf6, 0x70, 0xd7, 0x57, 0x0b, 0x05, 0x7e, 0xbf, + 0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0xa3, 0x80, 0xda, 0x44, 0x5d, 0xb7, 0xf8, 0xfe, 0x6c, + 0x73, 0x20, 0x96, 0x38, 0x7e, 0xc8, 0x47, 0x2e, 0xf1, 0x9c, 0x5d, 0xcb, 0xb7, 0x5a, 0x84, 0xaa, + 0xcb, 0x11, 0x6f, 0xfd, 0xb6, 0x86, 0xc3, 0x29, 0x4a, 0xf3, 0xdf, 0x39, 0x28, 0x6d, 0x05, 0xbe, + 0xe3, 0x86, 0xea, 0x72, 0x85, 0xbd, 0xce, 0x50, 0xf0, 0xb8, 0xd3, 0xeb, 0x10, 0x2c, 0x30, 0xe8, + 0x79, 0x98, 0x61, 0xa1, 0x15, 0x76, 0x99, 0xb0, 0xa7, 0xd4, 0x78, 0x2c, 0x0a, 0x4b, 0x07, 0x02, + 0x7a, 0xde, 0xaf, 0x2d, 0xc4, 0xe2, 0x24, 0x08, 0x2b, 0x06, 0xee, 0xe9, 0xc1, 0xa1, 0xd8, 0x28, + 0xe7, 0x96, 0x4c, 0x7b, 0x51, 0xfe, 0xc8, 0x27, 0x9e, 0xbe, 0x37, 0x44, 0x81, 0x47, 0x70, 0xa1, + 0x53, 0x40, 0x9e, 0xc5, 0xc2, 0x3b, 0xd4, 0xf2, 0x99, 0xd0, 0x75, 0xc7, 0x6d, 0x13, 0x75, 0xe1, + 0xbf, 0x98, 0xed, 0xc4, 0x39, 0x47, 0xa2, 0xf7, 0xf6, 0x90, 0x34, 0x3c, 0x42, 0x03, 0x7a, 0x02, + 0x66, 0x28, 0xb1, 0x58, 0xe0, 0x57, 0x0b, 0x62, 0xf9, 0x71, 0x54, 0xc6, 0x02, 0x8a, 0x15, 0x96, + 0x07, 0xb4, 0x36, 0x61, 0xcc, 0x6a, 0x45, 0xe1, 0x35, 0x0e, 0x68, 0xbb, 0x12, 0x8c, 0x23, 0xbc, + 0xf9, 0x5b, 0x03, 0x2a, 0x5b, 0x94, 0x58, 0x21, 0x99, 0xc4, 0x2d, 0x3e, 0xf5, 0x89, 0xa3, 0x4d, + 0x58, 0x10, 0xdf, 0xf7, 0x2c, 0xcf, 0x75, 0xe4, 0x19, 0x4c, 0x0b, 0xe6, 0xff, 0x57, 0xcc, 0x0b, + 0xdb, 0x69, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x87, 0x4a, 0x93, 0x78, 0x24, 0x31, 0x79, 0x1b, + 0x50, 0x8b, 0x5a, 0x36, 0xd9, 0x27, 0xd4, 0x0d, 0x9c, 0x03, 0x62, 0x07, 0xbe, 0xc3, 0x84, 0x1b, + 0xe5, 0x1b, 0xff, 0xc7, 0xf7, 0xf7, 0xd6, 0x10, 0x16, 0x8f, 0xe0, 0x40, 0x1e, 0x54, 0x3a, 0x54, + 0xfc, 0x16, 0x7b, 0x2e, 0xbd, 0xac, 0x7c, 0xed, 0xcb, 0xd9, 0x8e, 0x74, 0x5f, 0x67, 0x6d, 0x2c, + 0x9d, 0xf5, 0x6b, 0x95, 0x14, 0x08, 0xa7, 0x85, 0xa3, 0xaf, 0xc3, 0x62, 0x40, 0x3b, 0xc7, 0x96, + 0xdf, 0x24, 0x1d, 0xe2, 0x3b, 0xc4, 0x0f, 0x99, 0xd8, 0xc8, 0x62, 0x63, 0x99, 0xd7, 0x22, 0x7b, + 0x03, 0x38, 0x3c, 0x44, 0x8d, 0x5e, 0x83, 0xa5, 0x0e, 0x0d, 0x3a, 0x56, 0x4b, 0x6c, 0xcc, 0x7e, + 0xe0, 0xb9, 0x76, 0x4f, 0x6d, 0xe7, 0x53, 0x67, 0xfd, 0xda, 0xd2, 0xfe, 0x20, 0xf2, 0xbc, 0x5f, + 0xbb, 0x24, 0xb6, 0x8e, 0x43, 0x12, 0x24, 0x1e, 0x16, 0xa3, 0xb9, 0x41, 0x61, 0x9c, 0x1b, 0x98, + 0x3b, 0x50, 0x6c, 0x76, 0xd5, 0x9d, 0x78, 0x11, 0x8a, 0x8e, 0xfa, 0xad, 0x76, 0x3e, 0xba, 0x9c, + 0x31, 0xcd, 0x79, 0xbf, 0x56, 0xe1, 0xe5, 0x67, 0x3d, 0x02, 0xe0, 0x98, 0xc5, 0x7c, 0x02, 0x8a, + 0xe2, 0xe0, 0xd9, 0xbd, 0x0d, 0xb4, 0x08, 0x79, 0x6c, 0xdd, 0x17, 0x52, 0xe6, 0x30, 0xff, 0xa9, + 0x45, 0xb1, 0x3d, 0x80, 0x5b, 0x24, 0x8c, 0x0e, 0x7e, 0x13, 0x16, 0xa2, 0x50, 0x9e, 0xce, 0x30, + 0xb1, 0x37, 0xe1, 0x34, 0x1a, 0x0f, 0xd2, 0x9b, 0xaf, 0x43, 0x49, 0x64, 0x21, 0x9e, 0xc2, 0x93, + 0x72, 0xc1, 0x78, 0x40, 0xb9, 0x10, 0xd5, 0x00, 0xb9, 0x71, 0x35, 0x80, 0x66, 0xae, 0x07, 0x15, + 0xc9, 0x1b, 0x15, 0x48, 0x99, 0x34, 0x3c, 0x05, 0xc5, 0xc8, 0x4c, 0xa5, 0x25, 0x2e, 0x8c, 0x23, + 0x41, 0x38, 0xa6, 0xd0, 0xb4, 0x1d, 0x43, 0x2a, 0xa3, 0x66, 0x53, 0xa6, 0x55, 0x3f, 0xb9, 0x07, + 0x57, 0x3f, 0x9a, 0xa6, 0x1f, 0x42, 0x75, 0x5c, 0x35, 0xfd, 0x10, 0x39, 0x3f, 0xbb, 0x29, 0xe6, + 0x3b, 0x06, 0x2c, 0xea, 0x92, 0xb2, 0x1f, 0x5f, 0x76, 0x25, 0x17, 0x57, 0x7b, 0xda, 0x8e, 0xfc, + 0xca, 0x80, 0xe5, 0xd4, 0xd2, 0x26, 0x3a, 0xf1, 0x09, 0x8c, 0xd2, 0x9d, 0x23, 0x3f, 0x81, 0x73, + 0xfc, 0x25, 0x07, 0x95, 0xdb, 0xd6, 0x21, 0xf1, 0x0e, 0x88, 0x47, 0xec, 0x30, 0xa0, 0xe8, 0x07, + 0x50, 0x6e, 0x5b, 0xa1, 0x7d, 0x2c, 0xa0, 0x51, 0x67, 0xd0, 0xcc, 0x16, 0xec, 0x52, 0x92, 0xea, + 0xbb, 0x89, 0x98, 0x9b, 0x7e, 0x48, 0x7b, 0x8d, 0x4b, 0xca, 0xa4, 0xb2, 0x86, 0xc1, 0xba, 0x36, + 0xd1, 0xce, 0x89, 0xef, 0x9b, 0x6f, 0x75, 0x78, 0xd9, 0x32, 0x79, 0x17, 0x99, 0x32, 0x01, 0x93, + 0x37, 0xbb, 0x2e, 0x25, 0x6d, 0xe2, 0x87, 0x49, 0x3b, 0xb7, 0x3b, 0x20, 0x1f, 0x0f, 0x69, 0x5c, + 0x79, 0x09, 0x16, 0x07, 0x8d, 0xe7, 0xf1, 0xe7, 0x84, 0xf4, 0xe4, 0x79, 0x61, 0xfe, 0x13, 0x2d, + 0x43, 0xe1, 0xd4, 0xf2, 0xba, 0xea, 0x36, 0x62, 0xf9, 0x71, 0x23, 0x77, 0xdd, 0x30, 0x7f, 0x63, + 0x40, 0x75, 0x9c, 0x21, 0xe8, 0xf3, 0x9a, 0xa0, 0x46, 0x59, 0x59, 0x95, 0x7f, 0x85, 0xf4, 0xa4, + 0xd4, 0x9b, 0x50, 0x0c, 0x3a, 0xbc, 0xa6, 0x08, 0xa8, 0x3a, 0xf5, 0x27, 0xa3, 0x93, 0xdc, 0x53, + 0xf0, 0xf3, 0x7e, 0xed, 0x72, 0x4a, 0x7c, 0x84, 0xc0, 0x31, 0x2b, 0x8f, 0xd4, 0xc2, 0x1e, 0x9e, + 0x3d, 0xe2, 0x48, 0x7d, 0x4f, 0x40, 0xb0, 0xc2, 0x98, 0x7f, 0x30, 0x60, 0x5a, 0x14, 0xe4, 0xaf, + 0x43, 0x91, 0xef, 0x9f, 0x63, 0x85, 0x96, 0xb0, 0x2b, 0x73, 0x2b, 0xc8, 0xb9, 0x77, 0x49, 0x68, + 0x25, 0xde, 0x16, 0x41, 0x70, 0x2c, 0x11, 0x61, 0x28, 0xb8, 0x21, 0x69, 0x47, 0x07, 0xf9, 0xf4, + 0x58, 0xd1, 0x6a, 0x10, 0x51, 0xc7, 0xd6, 0xfd, 0x9b, 0x6f, 0x85, 0xc4, 0xe7, 0x87, 0x91, 0x5c, + 0x8d, 0x1d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x55, 0x71, 0xe7, 0x67, 0xc4, 0x3b, + 0xba, 0xed, 0xfa, 0x27, 0x6a, 0x5b, 0x63, 0x73, 0x0e, 0x14, 0x1c, 0xc7, 0x14, 0xa3, 0xd2, 0x43, + 0x6e, 0xb2, 0xf4, 0xc0, 0x15, 0xda, 0x81, 0x1f, 0xba, 0x7e, 0x77, 0xe8, 0xb6, 0x6d, 0x29, 0x38, + 0x8e, 0x29, 0x78, 0x21, 0x42, 0x49, 0xdb, 0x72, 0x7d, 0xd7, 0x6f, 0xf1, 0x45, 0x6c, 0x05, 0x5d, + 0x3f, 0x14, 0x19, 0x59, 0x15, 0x22, 0x78, 0x08, 0x8b, 0x47, 0x70, 0x98, 0xbf, 0x9f, 0x86, 0x32, + 0x5f, 0x73, 0x94, 0xe7, 0x5e, 0x80, 0x8a, 0xa7, 0x7b, 0x81, 0x5a, 0xfb, 0x65, 0x65, 0x4a, 0xfa, + 0x5e, 0xe3, 0x34, 0x2d, 0x67, 0x16, 0x25, 0x54, 0xcc, 0x9c, 0x4b, 0x33, 0x6f, 0xeb, 0x48, 0x9c, + 0xa6, 0xe5, 0xd1, 0xeb, 0x3e, 0xbf, 0x1f, 0xaa, 0x32, 0x89, 0x8f, 0xe8, 0x9b, 0x1c, 0x88, 0x25, + 0x0e, 0xed, 0xc2, 0x25, 0xcb, 0xf3, 0x82, 0xfb, 0x02, 0xd8, 0x08, 0x82, 0x93, 0xb6, 0x45, 0x4f, + 0x98, 0x68, 0xa6, 0x8b, 0x8d, 0xcf, 0x29, 0x96, 0x4b, 0x9b, 0xc3, 0x24, 0x78, 0x14, 0xdf, 0xa8, + 0x63, 0x9b, 0x9e, 0xf0, 0xd8, 0x8e, 0x61, 0x79, 0x00, 0x24, 0x6e, 0xb9, 0xea, 0x6c, 0x9f, 0x55, + 0x72, 0x96, 0xf1, 0x08, 0x9a, 0xf3, 0x31, 0x70, 0x3c, 0x52, 0x22, 0xba, 0x01, 0xf3, 0xdc, 0x93, + 0x83, 0x6e, 0x18, 0xd5, 0x9d, 0x05, 0x71, 0xdc, 0xe8, 0xac, 0x5f, 0x9b, 0xbf, 0x93, 0xc2, 0xe0, + 0x01, 0x4a, 0xbe, 0xb9, 0x9e, 0xdb, 0x76, 0xc3, 0xea, 0xac, 0x60, 0x89, 0x37, 0xf7, 0x36, 0x07, + 0x62, 0x89, 0x4b, 0x79, 0x60, 0xf1, 0x22, 0x0f, 0x34, 0xff, 0x9c, 0x07, 0x24, 0x6b, 0x6d, 0x47, + 0xd6, 0x53, 0x32, 0xa4, 0xf1, 0x8e, 0x40, 0xd5, 0xea, 0xc6, 0x40, 0x47, 0xa0, 0xca, 0xf4, 0x08, + 0x8f, 0x76, 0xa1, 0x24, 0x43, 0x4b, 0x72, 0x5d, 0xd6, 0x15, 0x71, 0x69, 0x2f, 0x42, 0x9c, 0xf7, + 0x6b, 0x2b, 0x29, 0x35, 0x31, 0x46, 0x74, 0x6b, 0x89, 0x04, 0x74, 0x0d, 0xc0, 0xea, 0xb8, 0xfa, + 0xbc, 0xae, 0x94, 0x4c, 0x6d, 0x92, 0xce, 0x1b, 0x6b, 0x54, 0xe8, 0x65, 0x98, 0x0e, 0x3f, 0x5d, + 0x47, 0x55, 0x14, 0x0d, 0x23, 0xef, 0x9f, 0x84, 0x04, 0xae, 0x5d, 0xf8, 0x33, 0xe3, 0x66, 0xa9, + 0x66, 0x28, 0xd6, 0xbe, 0x1d, 0x63, 0xb0, 0x46, 0x85, 0xbe, 0x05, 0xc5, 0x23, 0x55, 0x8a, 0x8a, + 0x83, 0xc9, 0x1c, 0x22, 0xa3, 0x02, 0x56, 0x8e, 0x0c, 0xa2, 0x2f, 0x1c, 0x4b, 0x43, 0x5f, 0x81, + 0x32, 0xeb, 0x1e, 0xc6, 0xd9, 0x5b, 0x9e, 0x66, 0x9c, 0x2a, 0x0f, 0x12, 0x14, 0xd6, 0xe9, 0xcc, + 0x37, 0xa1, 0xb4, 0xeb, 0xda, 0x34, 0x10, 0x3d, 0xe0, 0x93, 0x30, 0xcb, 0x52, 0x0d, 0x4e, 0x7c, + 0x92, 0x91, 0x97, 0x45, 0x78, 0xee, 0x5e, 0xbe, 0xe5, 0x07, 0xb2, 0x8d, 0x29, 0x24, 0xee, 0xf5, + 0x2a, 0x07, 0x62, 0x89, 0xbb, 0xb1, 0xcc, 0x0b, 0x84, 0x9f, 0xbe, 0x5f, 0x9b, 0x7a, 0xf7, 0xfd, + 0xda, 0xd4, 0x7b, 0xef, 0xab, 0x62, 0xe1, 0x1c, 0x00, 0xf6, 0x0e, 0xbf, 0x47, 0x6c, 0x19, 0x76, + 0x33, 0x8d, 0xf5, 0xa2, 0x69, 0xb2, 0x18, 0xeb, 0xe5, 0x06, 0x8a, 0x3e, 0x0d, 0x87, 0x53, 0x94, + 0x68, 0x1d, 0x4a, 0xf1, 0xc0, 0x4e, 0xf9, 0xc7, 0x52, 0xe4, 0x6f, 0xf1, 0x54, 0x0f, 0x27, 0x34, + 0xa9, 0x1c, 0x30, 0x7d, 0x61, 0x0e, 0x68, 0x40, 0xbe, 0xeb, 0x3a, 0xaa, 0x61, 0x7e, 0x26, 0xca, + 0xc1, 0x77, 0x77, 0x9a, 0xe7, 0xfd, 0xda, 0x63, 0xe3, 0xe6, 0xe4, 0x61, 0xaf, 0x43, 0x58, 0xfd, + 0xee, 0x4e, 0x13, 0x73, 0xe6, 0x51, 0x01, 0x69, 0x66, 0xc2, 0x80, 0x74, 0x0d, 0xa0, 0x95, 0x8c, + 0x1d, 0xe4, 0x7d, 0x8f, 0x1d, 0x51, 0x1b, 0x37, 0x68, 0x54, 0x88, 0xc1, 0x92, 0xcd, 0x5b, 0x73, + 0xd5, 0xfe, 0xb3, 0xd0, 0x6a, 0xcb, 0x41, 0xe6, 0x64, 0x77, 0xe2, 0x8a, 0x52, 0xb3, 0xb4, 0x35, + 0x28, 0x0c, 0x0f, 0xcb, 0x47, 0x01, 0x2c, 0x39, 0xaa, 0x43, 0x4c, 0x94, 0x96, 0x26, 0x56, 0x7a, + 0x99, 0x2b, 0x6c, 0x0e, 0x0a, 0xc2, 0xc3, 0xb2, 0xd1, 0x77, 0x61, 0x25, 0x02, 0x0e, 0xb7, 0xe9, + 0x22, 0x60, 0xe7, 0x1b, 0xab, 0x67, 0xfd, 0xda, 0x4a, 0x73, 0x2c, 0x15, 0x7e, 0x80, 0x04, 0xe4, + 0xc0, 0x8c, 0x27, 0x0b, 0xdc, 0xb2, 0x28, 0x4a, 0xbe, 0x9a, 0x6d, 0x15, 0x89, 0xf7, 0xd7, 0xf5, + 0xc2, 0x36, 0x1e, 0xb9, 0xa8, 0x9a, 0x56, 0xc9, 0x46, 0x6f, 0x41, 0xd9, 0xf2, 0xfd, 0x20, 0xb4, + 0xe4, 0xe0, 0x60, 0x4e, 0xa8, 0xda, 0x9c, 0x58, 0xd5, 0x66, 0x22, 0x63, 0xa0, 0x90, 0xd6, 0x30, + 0x58, 0x57, 0x85, 0xee, 0xc3, 0x42, 0x70, 0xdf, 0x27, 0x14, 0x93, 0x23, 0x42, 0x89, 0x6f, 0x13, + 0x56, 0xad, 0x08, 0xed, 0xcf, 0x66, 0xd4, 0x9e, 0x62, 0x4e, 0x5c, 0x3a, 0x0d, 0x67, 0x78, 0x50, + 0x0b, 0xaa, 0xf3, 0xd8, 0xea, 0x5b, 0x9e, 0xfb, 0x7d, 0x42, 0x59, 0x75, 0x3e, 0x99, 0x35, 0x6f, + 0xc7, 0x50, 0xac, 0x51, 0xf0, 0xe8, 0x67, 0x7b, 0x5d, 0x16, 0x12, 0x39, 0xf8, 0x5f, 0x48, 0x47, + 0xbf, 0xad, 0x04, 0x85, 0x75, 0x3a, 0xd4, 0x85, 0x4a, 0x5b, 0xcf, 0x34, 0xd5, 0x25, 0xb1, 0xba, + 0xeb, 0xd9, 0x56, 0x37, 0x9c, 0x0b, 0x93, 0xc2, 0x27, 0x85, 0xc3, 0x69, 0x2d, 0x2b, 0xcf, 0x43, + 0xf9, 0x53, 0xf6, 0x04, 0xbc, 0xa7, 0x18, 0x3c, 0xc7, 0x89, 0x7a, 0x8a, 0x3f, 0xe6, 0x60, 0x3e, + 0xbd, 0xfb, 0x03, 0x59, 0xb4, 0x90, 0x29, 0x8b, 0x46, 0xdd, 0xab, 0x31, 0xf6, 0xad, 0x22, 0x0a, + 0xeb, 0xf9, 0xb1, 0x61, 0x5d, 0x45, 0xcf, 0xe9, 0x87, 0x89, 0x9e, 0x75, 0x00, 0x5e, 0x9e, 0xd0, + 0xc0, 0xf3, 0x08, 0x15, 0x81, 0xb3, 0xa8, 0xde, 0x24, 0x62, 0x28, 0xd6, 0x28, 0x78, 0x11, 0x7d, + 0xe8, 0x05, 0xf6, 0x89, 0xd8, 0x82, 0xe8, 0xd2, 0x8b, 0x90, 0x59, 0x94, 0x45, 0x74, 0x63, 0x08, + 0x8b, 0x47, 0x70, 0x98, 0x3d, 0xb8, 0xbc, 0x6f, 0xd1, 0xd0, 0xb5, 0xbc, 0xe4, 0x82, 0x89, 0x2e, + 0xe5, 0x8d, 0xa1, 0x1e, 0xe8, 0x99, 0x49, 0x2f, 0x6a, 0xb2, 0xf9, 0x09, 0x2c, 0xe9, 0x83, 0xcc, + 0xbf, 0x1a, 0x70, 0x65, 0xa4, 0xee, 0xcf, 0xa0, 0x07, 0x7b, 0x23, 0xdd, 0x83, 0xbd, 0x90, 0x71, + 0x78, 0x39, 0xca, 0xda, 0x31, 0x1d, 0xd9, 0x2c, 0x14, 0xf6, 0x79, 0xed, 0x6b, 0x7e, 0x68, 0xc0, + 0x9c, 0xf8, 0x35, 0xc9, 0xec, 0xb8, 0x96, 0x7e, 0x52, 0x28, 0x3d, 0xba, 0xe7, 0x84, 0x47, 0x31, + 0x5c, 0x7e, 0xc7, 0x80, 0xf4, 0xd4, 0x16, 0xbd, 0x24, 0xaf, 0x80, 0x11, 0x8f, 0x55, 0x27, 0x74, + 0xff, 0x17, 0xc7, 0x35, 0xa1, 0x97, 0x32, 0xcd, 0x27, 0x9f, 0x82, 0x12, 0x0e, 0x82, 0x70, 0xdf, + 0x0a, 0x8f, 0x19, 0xdf, 0xbb, 0x0e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, 0x09, 0x37, + 0x7f, 0x6e, 0xc0, 0x95, 0xb1, 0x2f, 0x45, 0x3c, 0x8a, 0xd8, 0xf1, 0x97, 0x5a, 0x51, 0xec, 0xc8, + 0x09, 0x1d, 0xd6, 0xa8, 0x78, 0xf7, 0x98, 0x7a, 0x5e, 0x1a, 0xec, 0x1e, 0x53, 0xda, 0x70, 0x9a, + 0xd6, 0xfc, 0x67, 0x0e, 0xd4, 0xd3, 0xcc, 0x7f, 0xd9, 0xe9, 0x9f, 0x18, 0x78, 0x18, 0x9a, 0x4f, + 0x3f, 0x0c, 0xc5, 0xaf, 0x40, 0xda, 0xcb, 0x48, 0xfe, 0xc1, 0x2f, 0x23, 0xe8, 0xb9, 0xf8, 0xb1, + 0x45, 0xfa, 0xd0, 0x6a, 0xfa, 0xb1, 0xe5, 0xbc, 0x5f, 0x9b, 0x53, 0xc2, 0xd3, 0x8f, 0x2f, 0xaf, + 0xc1, 0xac, 0x43, 0x42, 0xcb, 0xf5, 0x64, 0x27, 0x98, 0xf9, 0xf9, 0x40, 0x0a, 0x6b, 0x4a, 0xd6, + 0x46, 0x99, 0xdb, 0xa4, 0x3e, 0x70, 0x24, 0x90, 0x07, 0x6c, 0x3b, 0x70, 0x64, 0x23, 0x53, 0x48, + 0x02, 0xf6, 0x56, 0xe0, 0x10, 0x2c, 0x30, 0xe6, 0xbb, 0x06, 0x94, 0xa5, 0xa4, 0x2d, 0xab, 0xcb, + 0x08, 0xda, 0x88, 0x57, 0x21, 0x8f, 0xfb, 0x8a, 0xfe, 0xaa, 0x76, 0xde, 0xaf, 0x95, 0x04, 0x99, + 0xe8, 0x81, 0x46, 0xbc, 0x1e, 0xe5, 0x2e, 0xd8, 0xa3, 0xc7, 0xa1, 0x20, 0x2e, 0x90, 0xda, 0xcc, + 0xe4, 0x79, 0x90, 0x03, 0xb1, 0xc4, 0x99, 0x1f, 0xe7, 0xa0, 0x92, 0x5a, 0x5c, 0x86, 0x76, 0x22, + 0x1e, 0x9a, 0xe6, 0x32, 0x0c, 0xe2, 0xc7, 0x3f, 0xc6, 0xab, 0xf4, 0x35, 0xf3, 0x30, 0xe9, 0xeb, + 0xdb, 0x30, 0x63, 0xf3, 0x3d, 0x8a, 0xfe, 0xdb, 0xb1, 0x31, 0xc9, 0x71, 0x8a, 0xdd, 0x4d, 0xbc, + 0x51, 0x7c, 0x32, 0xac, 0x04, 0xa2, 0x5b, 0xb0, 0x44, 0x49, 0x48, 0x7b, 0x9b, 0x47, 0x21, 0xa1, + 0xfa, 0xf8, 0xa0, 0x90, 0x14, 0xed, 0x78, 0x90, 0x00, 0x0f, 0xf3, 0x98, 0x87, 0x30, 0x77, 0xc7, + 0x3a, 0xf4, 0xe2, 0x07, 0x31, 0x0c, 0x15, 0xd7, 0xb7, 0xbd, 0xae, 0x43, 0x64, 0x40, 0x8f, 0xa2, + 0x57, 0x74, 0x69, 0x77, 0x74, 0xe4, 0x79, 0xbf, 0x76, 0x29, 0x05, 0x90, 0x2f, 0x40, 0x38, 0x2d, + 0xc2, 0xf4, 0x60, 0xfa, 0x33, 0x6c, 0x40, 0xbf, 0x03, 0xa5, 0xa4, 0x45, 0x78, 0xc4, 0x2a, 0xcd, + 0x37, 0xa0, 0xc8, 0x3d, 0x3e, 0x6a, 0x6d, 0x2f, 0xa8, 0x92, 0xd2, 0xb5, 0x57, 0x2e, 0x4b, 0xed, + 0x25, 0x9e, 0x55, 0xef, 0x76, 0x9c, 0x87, 0x7c, 0x56, 0xcd, 0x3d, 0x4c, 0xe6, 0xcb, 0x4f, 0x98, + 0xf9, 0xae, 0x81, 0xfc, 0xeb, 0x09, 0x4f, 0x32, 0xb2, 0x80, 0xd0, 0x92, 0x8c, 0x9e, 0xff, 0xb5, + 0x37, 0x85, 0x1f, 0x1b, 0x00, 0x62, 0x78, 0x77, 0xf3, 0x94, 0xf8, 0x61, 0x86, 0x07, 0xfc, 0xbb, + 0x30, 0x13, 0x48, 0x8f, 0x94, 0x4f, 0xab, 0x13, 0x4e, 0x88, 0xe3, 0x8b, 0x24, 0x7d, 0x12, 0x2b, + 0x61, 0x8d, 0xab, 0x1f, 0x7c, 0xb2, 0x3a, 0xf5, 0xe1, 0x27, 0xab, 0x53, 0x1f, 0x7d, 0xb2, 0x3a, + 0xf5, 0xf6, 0xd9, 0xaa, 0xf1, 0xc1, 0xd9, 0xaa, 0xf1, 0xe1, 0xd9, 0xaa, 0xf1, 0xd1, 0xd9, 0xaa, + 0xf1, 0xf1, 0xd9, 0xaa, 0xf1, 0xee, 0xdf, 0x57, 0xa7, 0x5e, 0xcb, 0x9d, 0x6e, 0xfc, 0x27, 0x00, + 0x00, 0xff, 0xff, 0x7e, 0xef, 0x1e, 0xdd, 0xf0, 0x27, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -1909,6 +1911,11 @@ func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.FieldValidation) + copy(dAtA[i:], m.FieldValidation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldValidation))) + i-- + dAtA[i] = 0x22 i -= len(m.FieldManager) copy(dAtA[i:], m.FieldManager) i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) @@ -2982,6 +2989,11 @@ func (m *PatchOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.FieldValidation) + copy(dAtA[i:], m.FieldValidation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldValidation))) + i-- + dAtA[i] = 0x22 i -= len(m.FieldManager) copy(dAtA[i:], m.FieldManager) i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) @@ -3382,6 +3394,11 @@ func (m *UpdateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.FieldValidation) + copy(dAtA[i:], m.FieldValidation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldValidation))) + i-- + dAtA[i] = 0x1a i -= len(m.FieldManager) copy(dAtA[i:], m.FieldManager) i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) @@ -3648,6 +3665,8 @@ func (m *CreateOptions) Size() (n int) { } l = len(m.FieldManager) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldValidation) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4069,6 +4088,8 @@ func (m *PatchOptions) Size() (n int) { } l = len(m.FieldManager) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldValidation) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4227,6 +4248,8 @@ func (m *UpdateOptions) Size() (n int) { } l = len(m.FieldManager) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldValidation) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4371,6 +4394,7 @@ func (this *CreateOptions) String() string { s := strings.Join([]string{`&CreateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `FieldValidation:` + fmt.Sprintf("%v", this.FieldValidation) + `,`, `}`, }, "") return s @@ -4634,6 +4658,7 @@ func (this *PatchOptions) String() string { `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, `Force:` + valueToStringGenerated(this.Force) + `,`, `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `FieldValidation:` + fmt.Sprintf("%v", this.FieldValidation) + `,`, `}`, }, "") return s @@ -4756,6 +4781,7 @@ func (this *UpdateOptions) String() string { s := strings.Join([]string{`&UpdateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `FieldValidation:` + fmt.Sprintf("%v", this.FieldValidation) + `,`, `}`, }, "") return s @@ -6097,6 +6123,38 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } m.FieldManager = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldValidation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldValidation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9824,6 +9882,38 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } m.FieldManager = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldValidation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldValidation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11145,6 +11235,38 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } m.FieldManager = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldValidation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldValidation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 7d450644d..472fcacb1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -242,6 +242,19 @@ message CreateOptions { // as defined by https://golang.org/pkg/unicode/#IsPrint. // +optional optional string fieldManager = 3; + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + optional string fieldValidation = 4; } // DeleteOptions may be provided when deleting an API object. @@ -362,7 +375,7 @@ message GroupVersionForDiscovery { } // GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling // // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionKind { @@ -374,7 +387,7 @@ message GroupVersionKind { } // GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling // // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionResource { @@ -878,6 +891,19 @@ message PatchOptions { // types (JsonPatch, MergePatch, StrategicMergePatch). // +optional optional string fieldManager = 3; + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + optional string fieldValidation = 4; } // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. @@ -1095,6 +1121,19 @@ message UpdateOptions { // as defined by https://golang.org/pkg/unicode/#IsPrint. // +optional optional string fieldManager = 2; + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + optional string fieldValidation = 3; } // Verbs masks the value so protobuf can generate diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go index 54a0944af..fc9e521e2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -44,7 +44,7 @@ func (gr *GroupResource) String() string { } // GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupVersionResource struct { @@ -80,7 +80,7 @@ func (gk *GroupKind) String() string { } // GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupVersionKind struct { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go index befab16f7..3cf9d48e9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go @@ -1,3 +1,4 @@ +//go:build !notest // +build !notest /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go index 94ad8d7cf..bf9e21b5b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go @@ -1,3 +1,4 @@ +//go:build !notest // +build !notest /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 9660282c4..f9c27c146 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -522,6 +522,15 @@ type DeleteOptions struct { DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` } +const ( + // FieldValidationIgnore ignores unknown/duplicate fields + FieldValidationIgnore = "Ignore" + // FieldValidationWarn responds with a warning, but successfully serve the request + FieldValidationWarn = "Warn" + // FieldValidationStrict fails the request on unknown/duplicate fields + FieldValidationStrict = "Strict" +) + // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -544,6 +553,19 @@ type CreateOptions struct { // as defined by https://golang.org/pkg/unicode/#IsPrint. // +optional FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + FieldValidation string `json:"fieldValidation,omitempty" protobuf:"bytes,4,name=fieldValidation"` } // +k8s:conversion-gen:explicit-from=net/url.Values @@ -577,6 +599,19 @@ type PatchOptions struct { // types (JsonPatch, MergePatch, StrategicMergePatch). // +optional FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + FieldValidation string `json:"fieldValidation,omitempty" protobuf:"bytes,4,name=fieldValidation"` } // ApplyOptions may be provided when applying an API object. @@ -632,6 +667,19 @@ type UpdateOptions struct { // as defined by https://golang.org/pkg/unicode/#IsPrint. // +optional FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,2,name=fieldManager"` + + // fieldValidation determines how the server should respond to + // unknown/duplicate fields in the object in the request. + // Introduced as alpha in 1.23, older servers or servers with the + // `ServerSideFieldValidation` feature disabled will discard valid values + // specified in this param and not perform any server side field validation. + // Valid values are: + // - Ignore: ignores unknown/duplicate fields. + // - Warn: responds with a warning for each + // unknown/duplicate field, but successfully serves the request. + // - Strict: fails the request on unknown/duplicate fields. + // +optional + FieldValidation string `json:"fieldValidation,omitempty" protobuf:"bytes,3,name=fieldValidation"` } // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 3eae04d07..088ff01f0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -112,9 +112,10 @@ func (Condition) SwaggerDoc() map[string]string { } var map_CreateOptions = map[string]string{ - "": "CreateOptions may be provided when creating an API object.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "": "CreateOptions may be provided when creating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "fieldValidation": "fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -302,10 +303,11 @@ func (Patch) SwaggerDoc() map[string]string { } var map_PatchOptions = map[string]string{ - "": "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "": "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "fieldValidation": "fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.", } func (PatchOptions) SwaggerDoc() map[string]string { @@ -447,9 +449,10 @@ func (TypeMeta) SwaggerDoc() map[string]string { } var map_UpdateOptions = map[string]string{ - "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "fieldValidation": "fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 7b101ea51..d26c6cff4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -382,7 +382,7 @@ func (unstructuredJSONScheme) Identifier() runtime.Identifier { func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { type detector struct { - Items gojson.RawMessage + Items gojson.RawMessage `json:"items"` } var det detector if err := json.Unmarshal(data, &det); err != nil { @@ -425,7 +425,7 @@ func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstru func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { type decodeList struct { - Items []gojson.RawMessage + Items []gojson.RawMessage `json:"items"` } var dList decodeList diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go index 9a9f25e8f..fe8250dd6 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index bd82a9d65..4c09898b8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -96,17 +96,19 @@ func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { } func ValidateCreateOptions(options *metav1.CreateOptions) field.ErrorList { - return append( - ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), - ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., - ) + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateFieldValidation(field.NewPath("fieldValidation"), options.FieldValidation)...) + return allErrs } func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { - return append( - ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), - ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., - ) + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateFieldValidation(field.NewPath("fieldValidation"), options.FieldValidation)...) + return allErrs } func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList { @@ -123,6 +125,7 @@ func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchTyp } allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateFieldValidation(field.NewPath("fieldValidation"), options.FieldValidation)...) return allErrs } @@ -159,6 +162,18 @@ func ValidateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { return allErrs } +var allowedFieldValidationValues = sets.NewString("", metav1.FieldValidationIgnore, metav1.FieldValidationWarn, metav1.FieldValidationStrict) + +// ValidateFieldValidation validates that a fieldValidation query param only contains allowed values. +func ValidateFieldValidation(fldPath *field.Path, fieldValidation string) field.ErrorList { + allErrs := field.ErrorList{} + if !allowedFieldValidationValues.Has(fieldValidation) { + allErrs = append(allErrs, field.NotSupported(fldPath, fieldValidation, allowedFieldValidationValues.List())) + } + return allErrs + +} + const UninitializedStatusUpdateErrorMsg string = `must not update status when the object is uninitialized` // ValidateTableOptions returns any invalid flags on TableOptions. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go index 3ecb67c82..b7590f0b3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -293,6 +294,13 @@ func autoConvert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptio } else { out.FieldManager = "" } + if values, ok := map[string][]string(*in)["fieldValidation"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldValidation, s); err != nil { + return err + } + } else { + out.FieldValidation = "" + } return nil } @@ -448,6 +456,13 @@ func autoConvert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions } else { out.FieldManager = "" } + if values, ok := map[string][]string(*in)["fieldValidation"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldValidation, s); err != nil { + return err + } + } else { + out.FieldValidation = "" + } return nil } @@ -496,6 +511,13 @@ func autoConvert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptio } else { out.FieldManager = "" } + if values, ok := map[string][]string(*in)["fieldValidation"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldValidation, s); err != nil { + return err + } + } else { + out.FieldValidation = "" + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index d43020da5..418e6099f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go index cce2e603a..dac177e93 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go index 89053b981..972b7f03e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go index 73e63fc11..198b5be4a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go index 791348476..ed51a25e3 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -44,23 +44,16 @@ type Converter struct { generatedConversionFuncs ConversionFuncs // Set of conversions that should be treated as a no-op - ignoredConversions map[typePair]struct{} ignoredUntypedConversions map[typePair]struct{} - - // nameFunc is called to retrieve the name of a type; this name is used for the - // purpose of deciding whether two types match or not (i.e., will we attempt to - // do a conversion). The default returns the go type name. - nameFunc func(t reflect.Type) string } // NewConverter creates a new Converter object. -func NewConverter(nameFn NameFunc) *Converter { +// Arg NameFunc is just for backward compatibility. +func NewConverter(NameFunc) *Converter { c := &Converter{ conversionFuncs: NewConversionFuncs(), generatedConversionFuncs: NewConversionFuncs(), - ignoredConversions: make(map[typePair]struct{}), ignoredUntypedConversions: make(map[typePair]struct{}), - nameFunc: nameFn, } c.RegisterUntypedConversionFunc( (*[]byte)(nil), (*[]byte)(nil), @@ -192,7 +185,6 @@ func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { if typeTo.Kind() != reflect.Ptr { return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo) } - c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{} c.ignoredUntypedConversions[typePair{typeFrom, typeTo}] = struct{}{} return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go index 4d482947f..fdf4c31e1 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 4a6cc6857..b99492a89 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -22,6 +22,7 @@ import ( "math" "os" "reflect" + "sort" "strconv" "strings" "sync" @@ -109,21 +110,141 @@ type unstructuredConverter struct { // to Go types via reflection. It performs mismatch detection automatically and is intended for use by external // test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection. func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter { + return NewTestUnstructuredConverterWithValidation(comparison) +} + +// NewTestUnstrucutredConverterWithValidation allows for access to +// FromUnstructuredWithValidation from within tests. +func NewTestUnstructuredConverterWithValidation(comparison conversion.Equalities) *unstructuredConverter { return &unstructuredConverter{ mismatchDetection: true, comparison: comparison, } } -// FromUnstructured converts an object from map[string]interface{} representation into a concrete type. +// fromUnstructuredContext provides options for informing the converter +// the state of its recursive walk through the conversion process. +type fromUnstructuredContext struct { + // isInlined indicates whether the converter is currently in + // an inlined field or not to determine whether it should + // validate the matchedKeys yet or only collect them. + // This should only be set from `structFromUnstructured` + isInlined bool + // matchedKeys is a stack of the set of all fields that exist in the + // concrete go type of the object being converted into. + // This should only be manipulated via `pushMatchedKeyTracker`, + // `recordMatchedKey`, or `popAndVerifyMatchedKeys` + matchedKeys []map[string]struct{} + // parentPath collects the path that the conversion + // takes as it traverses the unstructured json map. + // It is used to report the full path to any unknown + // fields that the converter encounters. + parentPath []string + // returnUnknownFields indicates whether or not + // unknown field errors should be collected and + // returned to the caller + returnUnknownFields bool + // unknownFieldErrors are the collection of + // the full path to each unknown field in the + // object. + unknownFieldErrors []error +} + +// pushMatchedKeyTracker adds a placeholder set for tracking +// matched keys for the given level. This should only be +// called from `structFromUnstructured`. +func (c *fromUnstructuredContext) pushMatchedKeyTracker() { + if !c.returnUnknownFields { + return + } + + c.matchedKeys = append(c.matchedKeys, nil) +} + +// recordMatchedKey initializes the last element of matchedKeys +// (if needed) and sets 'key'. This should only be called from +// `structFromUnstructured`. +func (c *fromUnstructuredContext) recordMatchedKey(key string) { + if !c.returnUnknownFields { + return + } + + last := len(c.matchedKeys) - 1 + if c.matchedKeys[last] == nil { + c.matchedKeys[last] = map[string]struct{}{} + } + c.matchedKeys[last][key] = struct{}{} +} + +// popAndVerifyMatchedKeys pops the last element of matchedKeys, +// checks the matched keys against the data, and adds unknown +// field errors for any matched keys. +// `mapValue` is the value of sv containing all of the keys that exist at this level +// (ie. sv.MapKeys) in the source data. +// `matchedKeys` are all the keys found for that level in the destination object. +// This should only be called from `structFromUnstructured`. +func (c *fromUnstructuredContext) popAndVerifyMatchedKeys(mapValue reflect.Value) { + if !c.returnUnknownFields { + return + } + + last := len(c.matchedKeys) - 1 + curMatchedKeys := c.matchedKeys[last] + c.matchedKeys[last] = nil + c.matchedKeys = c.matchedKeys[:last] + for _, key := range mapValue.MapKeys() { + if _, ok := curMatchedKeys[key.String()]; !ok { + c.recordUnknownField(key.String()) + } + } +} + +func (c *fromUnstructuredContext) recordUnknownField(field string) { + if !c.returnUnknownFields { + return + } + + pathLen := len(c.parentPath) + c.pushKey(field) + errPath := strings.Join(c.parentPath, "") + c.parentPath = c.parentPath[:pathLen] + c.unknownFieldErrors = append(c.unknownFieldErrors, fmt.Errorf(`unknown field "%s"`, errPath)) +} + +func (c *fromUnstructuredContext) pushIndex(index int) { + if !c.returnUnknownFields { + return + } + + c.parentPath = append(c.parentPath, "[", strconv.Itoa(index), "]") +} + +func (c *fromUnstructuredContext) pushKey(key string) { + if !c.returnUnknownFields { + return + } + + if len(c.parentPath) > 0 { + c.parentPath = append(c.parentPath, ".") + } + c.parentPath = append(c.parentPath, key) + +} + +// FromUnstructuredWIthValidation converts an object from map[string]interface{} representation into a concrete type. // It uses encoding/json/Unmarshaler if object implements it or reflection if not. -func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error { +// It takes a validationDirective that indicates how to behave when it encounters unknown fields. +func (c *unstructuredConverter) FromUnstructuredWithValidation(u map[string]interface{}, obj interface{}, returnUnknownFields bool) error { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) if t.Kind() != reflect.Ptr || value.IsNil() { return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t) } - err := fromUnstructured(reflect.ValueOf(u), value.Elem()) + + fromUnstructuredContext := &fromUnstructuredContext{ + returnUnknownFields: returnUnknownFields, + } + err := fromUnstructured(reflect.ValueOf(u), value.Elem(), fromUnstructuredContext) if c.mismatchDetection { newObj := reflect.New(t.Elem()).Interface() newErr := fromUnstructuredViaJSON(u, newObj) @@ -134,7 +255,23 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } - return err + if err != nil { + return err + } + if returnUnknownFields && len(fromUnstructuredContext.unknownFieldErrors) > 0 { + sort.Slice(fromUnstructuredContext.unknownFieldErrors, func(i, j int) bool { + return fromUnstructuredContext.unknownFieldErrors[i].Error() < + fromUnstructuredContext.unknownFieldErrors[j].Error() + }) + return NewStrictDecodingError(fromUnstructuredContext.unknownFieldErrors) + } + return nil +} + +// FromUnstructured converts an object from map[string]interface{} representation into a concrete type. +// It uses encoding/json/Unmarshaler if object implements it or reflection if not. +func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error { + return c.FromUnstructuredWithValidation(u, obj, false) } func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error { @@ -145,7 +282,7 @@ func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error { return json.Unmarshal(data, obj) } -func fromUnstructured(sv, dv reflect.Value) error { +func fromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { sv = unwrapInterface(sv) if !sv.IsValid() { dv.Set(reflect.Zero(dv.Type())) @@ -213,18 +350,19 @@ func fromUnstructured(sv, dv reflect.Value) error { switch dt.Kind() { case reflect.Map: - return mapFromUnstructured(sv, dv) + return mapFromUnstructured(sv, dv, ctx) case reflect.Slice: - return sliceFromUnstructured(sv, dv) + return sliceFromUnstructured(sv, dv, ctx) case reflect.Ptr: - return pointerFromUnstructured(sv, dv) + return pointerFromUnstructured(sv, dv, ctx) case reflect.Struct: - return structFromUnstructured(sv, dv) + return structFromUnstructured(sv, dv, ctx) case reflect.Interface: return interfaceFromUnstructured(sv, dv) default: return fmt.Errorf("unrecognized type: %v", dt.Kind()) } + } func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo { @@ -275,7 +413,7 @@ func unwrapInterface(v reflect.Value) reflect.Value { return v } -func mapFromUnstructured(sv, dv reflect.Value) error { +func mapFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { st, dt := sv.Type(), dv.Type() if st.Kind() != reflect.Map { return fmt.Errorf("cannot restore map from %v", st.Kind()) @@ -293,7 +431,7 @@ func mapFromUnstructured(sv, dv reflect.Value) error { for _, key := range sv.MapKeys() { value := reflect.New(dt.Elem()).Elem() if val := unwrapInterface(sv.MapIndex(key)); val.IsValid() { - if err := fromUnstructured(val, value); err != nil { + if err := fromUnstructured(val, value, ctx); err != nil { return err } } else { @@ -308,7 +446,7 @@ func mapFromUnstructured(sv, dv reflect.Value) error { return nil } -func sliceFromUnstructured(sv, dv reflect.Value) error { +func sliceFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { st, dt := sv.Type(), dv.Type() if st.Kind() == reflect.String && dt.Elem().Kind() == reflect.Uint8 { // We store original []byte representation as string. @@ -340,15 +478,22 @@ func sliceFromUnstructured(sv, dv reflect.Value) error { return nil } dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) + + pathLen := len(ctx.parentPath) + defer func() { + ctx.parentPath = ctx.parentPath[:pathLen] + }() for i := 0; i < sv.Len(); i++ { - if err := fromUnstructured(sv.Index(i), dv.Index(i)); err != nil { + ctx.pushIndex(i) + if err := fromUnstructured(sv.Index(i), dv.Index(i), ctx); err != nil { return err } + ctx.parentPath = ctx.parentPath[:pathLen] } return nil } -func pointerFromUnstructured(sv, dv reflect.Value) error { +func pointerFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { st, dt := sv.Type(), dv.Type() if st.Kind() == reflect.Ptr && sv.IsNil() { @@ -358,38 +503,63 @@ func pointerFromUnstructured(sv, dv reflect.Value) error { dv.Set(reflect.New(dt.Elem())) switch st.Kind() { case reflect.Ptr, reflect.Interface: - return fromUnstructured(sv.Elem(), dv.Elem()) + return fromUnstructured(sv.Elem(), dv.Elem(), ctx) default: - return fromUnstructured(sv, dv.Elem()) + return fromUnstructured(sv, dv.Elem(), ctx) } } -func structFromUnstructured(sv, dv reflect.Value) error { +func structFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { st, dt := sv.Type(), dv.Type() if st.Kind() != reflect.Map { return fmt.Errorf("cannot restore struct from: %v", st.Kind()) } + pathLen := len(ctx.parentPath) + svInlined := ctx.isInlined + defer func() { + ctx.parentPath = ctx.parentPath[:pathLen] + ctx.isInlined = svInlined + }() + if !svInlined { + ctx.pushMatchedKeyTracker() + } for i := 0; i < dt.NumField(); i++ { fieldInfo := fieldInfoFromField(dt, i) fv := dv.Field(i) if len(fieldInfo.name) == 0 { - // This field is inlined. - if err := fromUnstructured(sv, fv); err != nil { + // This field is inlined, recurse into fromUnstructured again + // with the same set of matched keys. + ctx.isInlined = true + if err := fromUnstructured(sv, fv, ctx); err != nil { return err } + ctx.isInlined = svInlined } else { + // This field is not inlined so we recurse into + // child field of sv corresponding to field i of + // dv, with a new set of matchedKeys and updating + // the parentPath to indicate that we are one level + // deeper. + ctx.recordMatchedKey(fieldInfo.name) value := unwrapInterface(sv.MapIndex(fieldInfo.nameValue)) if value.IsValid() { - if err := fromUnstructured(value, fv); err != nil { + ctx.isInlined = false + ctx.pushKey(fieldInfo.name) + if err := fromUnstructured(value, fv, ctx); err != nil { return err } + ctx.parentPath = ctx.parentPath[:pathLen] + ctx.isInlined = svInlined } else { fv.Set(reflect.Zero(fv.Type())) } } } + if !svInlined { + ctx.popAndVerifyMatchedKeys(sv) + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index be0c5edc8..7dfa45762 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -19,6 +19,7 @@ package runtime import ( "fmt" "reflect" + "strings" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -124,20 +125,30 @@ func IsMissingVersion(err error) bool { // strictDecodingError is a base error type that is returned by a strict Decoder such // as UniversalStrictDecoder. type strictDecodingError struct { - message string - data string + errors []error } // NewStrictDecodingError creates a new strictDecodingError object. -func NewStrictDecodingError(message string, data string) error { +func NewStrictDecodingError(errors []error) error { return &strictDecodingError{ - message: message, - data: data, + errors: errors, } } func (e *strictDecodingError) Error() string { - return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message) + var s strings.Builder + s.WriteString("strict decoding error: ") + for i, err := range e.errors { + if i != 0 { + s.WriteString(", ") + } + s.WriteString(err.Error()) + } + return s.String() +} + +func (e *strictDecodingError) Errors() []error { + return e.errors } // IsStrictDecodingError returns true if the error indicates that the provided object @@ -149,3 +160,13 @@ func IsStrictDecodingError(err error) bool { _, ok := err.(*strictDecodingError) return ok } + +// AsStrictDecodingError returns a strict decoding error +// containing all the strictness violations. +func AsStrictDecodingError(err error) (*strictDecodingError, bool) { + if err == nil { + return nil, false + } + strictErr, ok := err.(*strictDecodingError) + return strictErr, ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 3e1fab1d1..b324b76c6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -125,6 +125,9 @@ type SerializerInfo struct { // PrettySerializer, if set, can serialize this object in a form biased towards // readability. PrettySerializer Serializer + // StrictSerializer, if set, deserializes this object strictly, + // erring on unknown fields. + StrictSerializer Serializer // StreamSerializer, if set, describes the streaming serialization format // for this media type. StreamSerializer *StreamSerializerInfo diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index ae47ab3ab..f5da6b12f 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -99,7 +99,7 @@ func NewScheme() *Scheme { versionPriority: map[string][]string{}, schemeName: naming.GetNameFromCallsite(internalPackages...), } - s.converter = conversion.NewConverter(s.nameFunc) + s.converter = conversion.NewConverter(nil) // Enable couple default conversions by default. utilruntime.Must(RegisterEmbeddedConversions(s)) @@ -107,28 +107,6 @@ func NewScheme() *Scheme { return s } -// nameFunc returns the name of the type that we wish to use to determine when two types attempt -// a conversion. Defaults to the go name of the type if the type is not registered. -func (s *Scheme) nameFunc(t reflect.Type) string { - // find the preferred names for this type - gvks, ok := s.typeToGVK[t] - if !ok { - return t.Name() - } - - for _, gvk := range gvks { - internalGV := gvk.GroupVersion() - internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in - internalGVK := internalGV.WithKind(gvk.Kind) - - if internalType, exists := s.gvkToType[internalGVK]; exists { - return s.typeToGVK[internalType][0].Kind - } - } - - return gvks[0].Kind -} - // Converter allows access to the converter for the scheme func (s *Scheme) Converter() *conversion.Converter { return s.converter diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index e55ab94d1..9de35e791 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -40,6 +40,7 @@ type serializerType struct { Serializer runtime.Serializer PrettySerializer runtime.Serializer + StrictSerializer runtime.Serializer AcceptStreamContentTypes []string StreamContentType string @@ -70,10 +71,20 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option ) } + strictJSONSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: true}, + ) + jsonSerializerType.StrictSerializer = strictJSONSerializer + yamlSerializer := json.NewSerializerWithOptions( mf, scheme, scheme, json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict}, ) + strictYAMLSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: true, Pretty: false, Strict: true}, + ) protoSerializer := protobuf.NewSerializer(scheme, scheme) protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) @@ -85,12 +96,16 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option FileExtensions: []string{"yaml"}, EncodesAsText: true, Serializer: yamlSerializer, + StrictSerializer: strictYAMLSerializer, }, { AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, ContentType: runtime.ContentTypeProtobuf, FileExtensions: []string{"pb"}, Serializer: protoSerializer, + // note, strict decoding is unsupported for protobuf, + // fall back to regular serializing + StrictSerializer: protoSerializer, Framer: protobuf.LengthDelimitedFramer, StreamSerializer: protoRawSerializer, @@ -187,6 +202,7 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec EncodesAsText: d.EncodesAsText, Serializer: d.Serializer, PrettySerializer: d.PrettySerializer, + StrictSerializer: d.StrictSerializer, } mediaType, _, err := mime.ParseMediaType(info.MediaType) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 48f0777b2..6c082f660 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -20,10 +20,8 @@ import ( "encoding/json" "io" "strconv" - "unsafe" - jsoniter "github.com/json-iterator/go" - "github.com/modern-go/reflect2" + kjson "sigs.k8s.io/json" "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" @@ -68,6 +66,7 @@ func identifier(options SerializerOptions) runtime.Identifier { "name": "json", "yaml": strconv.FormatBool(options.Yaml), "pretty": strconv.FormatBool(options.Pretty), + "strict": strconv.FormatBool(options.Strict), } identifier, err := json.Marshal(result) if err != nil { @@ -110,79 +109,6 @@ type Serializer struct { var _ runtime.Serializer = &Serializer{} var _ recognizer.RecognizingDecoder = &Serializer{} -type customNumberExtension struct { - jsoniter.DummyExtension -} - -func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder { - if typ.String() == "interface {}" { - return customNumberDecoder{} - } - return nil -} - -type customNumberDecoder struct { -} - -func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - switch iter.WhatIsNext() { - case jsoniter.NumberValue: - var number jsoniter.Number - iter.ReadVal(&number) - i64, err := strconv.ParseInt(string(number), 10, 64) - if err == nil { - *(*interface{})(ptr) = i64 - return - } - f64, err := strconv.ParseFloat(string(number), 64) - if err == nil { - *(*interface{})(ptr) = f64 - return - } - iter.ReportError("DecodeNumber", err.Error()) - default: - *(*interface{})(ptr) = iter.Read() - } -} - -// CaseSensitiveJSONIterator returns a jsoniterator API that's configured to be -// case-sensitive when unmarshalling, and otherwise compatible with -// the encoding/json standard library. -func CaseSensitiveJSONIterator() jsoniter.API { - config := jsoniter.Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, - CaseSensitive: true, - }.Froze() - // Force jsoniter to decode number to interface{} via int64/float64, if possible. - config.RegisterExtension(&customNumberExtension{}) - return config -} - -// StrictCaseSensitiveJSONIterator returns a jsoniterator API that's configured to be -// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with -// the encoding/json standard library. -func StrictCaseSensitiveJSONIterator() jsoniter.API { - config := jsoniter.Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, - CaseSensitive: true, - DisallowUnknownFields: true, - }.Froze() - // Force jsoniter to decode number to interface{} via int64/float64, if possible. - config.RegisterExtension(&customNumberExtension{}) - return config -} - -// Private copies of jsoniter to try to shield against possible mutations -// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them -// in some other library will mess with every usage of the jsoniter library in the whole program. -// See https://github.com/json-iterator/go/issues/265 -var caseSensitiveJSONIterator = CaseSensitiveJSONIterator() -var strictCaseSensitiveJSONIterator = StrictCaseSensitiveJSONIterator() - // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { if len(actual.Kind) == 0 { @@ -237,8 +163,11 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i types, _, err := s.typer.ObjectKinds(into) switch { case runtime.IsNotRegisteredError(err), isUnstructured: - if err := caseSensitiveJSONIterator.Unmarshal(data, into); err != nil { + strictErrs, err := s.unmarshal(into, data, originalData) + if err != nil { return nil, actual, err + } else if len(strictErrs) > 0 { + return into, actual, runtime.NewStrictDecodingError(strictErrs) } return into, actual, nil case err != nil: @@ -261,35 +190,12 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i return nil, actual, err } - if err := caseSensitiveJSONIterator.Unmarshal(data, obj); err != nil { - return nil, actual, err - } - - // If the deserializer is non-strict, return successfully here. - if !s.options.Strict { - return obj, actual, nil - } - - // In strict mode pass the data trough the YAMLToJSONStrict converter. - // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data, - // the output would equal the input, unless there is a parsing error such as duplicate fields. - // As we know this was successful in the non-strict case, the only error that may be returned here - // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError - // the actual error is that the object contains duplicate fields. - altered, err := yaml.YAMLToJSONStrict(originalData) + strictErrs, err := s.unmarshal(obj, data, originalData) if err != nil { - return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) + return nil, actual, err + } else if len(strictErrs) > 0 { + return obj, actual, runtime.NewStrictDecodingError(strictErrs) } - // As performance is not an issue for now for the strict deserializer (one has regardless to do - // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated - // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is - // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, - // the actual error is that the object contains unknown field. - strictObj := obj.DeepCopyObject() - if err := strictCaseSensitiveJSONIterator.Unmarshal(altered, strictObj); err != nil { - return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) - } - // Always return the same object as the non-strict serializer to avoid any deviations. return obj, actual, nil } @@ -303,7 +209,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { if s.options.Yaml { - json, err := caseSensitiveJSONIterator.Marshal(obj) + json, err := json.Marshal(obj) if err != nil { return err } @@ -316,7 +222,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { } if s.options.Pretty { - data, err := caseSensitiveJSONIterator.MarshalIndent(obj, "", " ") + data, err := json.MarshalIndent(obj, "", " ") if err != nil { return err } @@ -327,6 +233,50 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { return encoder.Encode(obj) } +// IsStrict indicates whether the serializer +// uses strict decoding or not +func (s *Serializer) IsStrict() bool { + return s.options.Strict +} + +func (s *Serializer) unmarshal(into runtime.Object, data, originalData []byte) (strictErrs []error, err error) { + // If the deserializer is non-strict, return here. + if !s.options.Strict { + if err := kjson.UnmarshalCaseSensitivePreserveInts(data, into); err != nil { + return nil, err + } + return nil, nil + } + + if s.options.Yaml { + // In strict mode pass the original data through the YAMLToJSONStrict converter. + // This is done to catch duplicate fields in YAML that would have been dropped in the original YAMLToJSON conversion. + // TODO: rework YAMLToJSONStrict to return warnings about duplicate fields without terminating so we don't have to do this twice. + _, err := yaml.YAMLToJSONStrict(originalData) + if err != nil { + strictErrs = append(strictErrs, err) + } + } + + var strictJSONErrs []error + if u, isUnstructured := into.(runtime.Unstructured); isUnstructured { + // Unstructured is a custom unmarshaler that gets delegated + // to, so inorder to detect strict JSON errors we need + // to unmarshal directly into the object. + m := u.UnstructuredContent() + strictJSONErrs, err = kjson.UnmarshalStrict(data, &m) + u.SetUnstructuredContent(m) + } else { + strictJSONErrs, err = kjson.UnmarshalStrict(data, into) + } + if err != nil { + // fatal decoding error, not due to strictness + return nil, err + } + strictErrs = append(strictErrs, strictJSONErrs...) + return strictErrs, nil +} + // Identifier implements runtime.Encoder interface. func (s *Serializer) Identifier() runtime.Identifier { return s.identifier diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go index 709f85291..5a6c200dd 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go @@ -109,10 +109,16 @@ func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime for _, r := range skipped { out, actual, err := r.Decode(data, gvk, into) if err != nil { - lastErr = err - continue + // if we got an object back from the decoder, and the + // error was a strict decoding error (e.g. unknown or + // duplicate fields), we still consider the recognizer + // to have understood the object + if out == nil || !runtime.IsStrictDecodingError(err) { + lastErr = err + continue + } } - return out, actual, nil + return out, actual, err } if lastErr == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index a60a7c041..971c46d49 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -90,7 +90,6 @@ func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) } // must read the rest of the frame (until we stop getting ErrShortBuffer) d.resetRead = true - base = 0 return nil, nil, ErrObjectTooLarge } if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 718c5dfb7..ea7c580bd 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -133,11 +133,18 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } } + var strictDecodingErr error obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto) if err != nil { - return nil, gvk, err + if obj != nil && runtime.IsStrictDecodingError(err) { + // save the strictDecodingError and the caller decide what to do with it + strictDecodingErr = err + } else { + return nil, gvk, err + } } + // TODO: look into strict handling of nested object decoding if d, ok := obj.(runtime.NestedObjectDecoder); ok { if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { return nil, gvk, err @@ -153,14 +160,14 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru // Short-circuit conversion if the into object is same object if into == obj { - return into, gvk, nil + return into, gvk, strictDecodingErr } if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil { return nil, gvk, err } - return into, gvk, nil + return into, gvk, strictDecodingErr } // perform defaulting if requested @@ -172,7 +179,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru if err != nil { return nil, gvk, err } - return out, gvk, nil + return out, gvk, strictDecodingErr } // Encode ensures the provided object is output in the appropriate group and version, invoking diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index b0393839e..069ea4f92 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go index faf2c2645..0d2f153bf 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -21,17 +21,17 @@ import ( "sync" "time" - utilclock "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" ) // NewExpiring returns an initialized expiring cache. func NewExpiring() *Expiring { - return NewExpiringWithClock(utilclock.RealClock{}) + return NewExpiringWithClock(clock.RealClock{}) } // NewExpiringWithClock is like NewExpiring but allows passing in a custom // clock for testing. -func NewExpiringWithClock(clock utilclock.Clock) *Expiring { +func NewExpiringWithClock(clock clock.Clock) *Expiring { return &Expiring{ clock: clock, cache: make(map[interface{}]entry), @@ -40,7 +40,7 @@ func NewExpiringWithClock(clock utilclock.Clock) *Expiring { // Expiring is a map whose entries expire after a per-entry timeout. type Expiring struct { - clock utilclock.Clock + clock clock.Clock // mu protects the below fields mu sync.RWMutex diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go deleted file mode 100644 index 1a544d3b2..000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ /dev/null @@ -1,445 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clock - -import ( - "sync" - "time" -) - -// PassiveClock allows for injecting fake or real clocks into code -// that needs to read the current time but does not support scheduling -// activity in the future. -type PassiveClock interface { - Now() time.Time - Since(time.Time) time.Duration -} - -// Clock allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -type Clock interface { - PassiveClock - After(time.Duration) <-chan time.Time - AfterFunc(time.Duration, func()) Timer - NewTimer(time.Duration) Timer - Sleep(time.Duration) - NewTicker(time.Duration) Ticker -} - -// RealClock really calls time.Now() -type RealClock struct{} - -// Now returns the current time. -func (RealClock) Now() time.Time { - return time.Now() -} - -// Since returns time since the specified timestamp. -func (RealClock) Since(ts time.Time) time.Duration { - return time.Since(ts) -} - -// After is the same as time.After(d). -func (RealClock) After(d time.Duration) <-chan time.Time { - return time.After(d) -} - -// AfterFunc is the same as time.AfterFunc(d, f). -func (RealClock) AfterFunc(d time.Duration, f func()) Timer { - return &realTimer{ - timer: time.AfterFunc(d, f), - } -} - -// NewTimer returns a new Timer. -func (RealClock) NewTimer(d time.Duration) Timer { - return &realTimer{ - timer: time.NewTimer(d), - } -} - -// NewTicker returns a new Ticker. -func (RealClock) NewTicker(d time.Duration) Ticker { - return &realTicker{ - ticker: time.NewTicker(d), - } -} - -// Sleep pauses the RealClock for duration d. -func (RealClock) Sleep(d time.Duration) { - time.Sleep(d) -} - -// FakePassiveClock implements PassiveClock, but returns an arbitrary time. -type FakePassiveClock struct { - lock sync.RWMutex - time time.Time -} - -// FakeClock implements Clock, but returns an arbitrary time. -type FakeClock struct { - FakePassiveClock - - // waiters are waiting for the fake time to pass their specified time - waiters []fakeClockWaiter -} - -type fakeClockWaiter struct { - targetTime time.Time - stepInterval time.Duration - skipIfBlocked bool - destChan chan time.Time - afterFunc func() -} - -// NewFakePassiveClock returns a new FakePassiveClock. -func NewFakePassiveClock(t time.Time) *FakePassiveClock { - return &FakePassiveClock{ - time: t, - } -} - -// NewFakeClock returns a new FakeClock -func NewFakeClock(t time.Time) *FakeClock { - return &FakeClock{ - FakePassiveClock: *NewFakePassiveClock(t), - } -} - -// Now returns f's time. -func (f *FakePassiveClock) Now() time.Time { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time -} - -// Since returns time since the time in f. -func (f *FakePassiveClock) Since(ts time.Time) time.Duration { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time.Sub(ts) -} - -// SetTime sets the time on the FakePassiveClock. -func (f *FakePassiveClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.time = t -} - -// After is the Fake version of time.After(d). -func (f *FakeClock) After(d time.Duration) <-chan time.Time { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - f.waiters = append(f.waiters, fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - }) - return ch -} - -// AfterFunc is the Fake version of time.AfterFunc(d, callback). -func (f *FakeClock) AfterFunc(d time.Duration, cb func()) Timer { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - - timer := &fakeTimer{ - fakeClock: f, - waiter: fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - afterFunc: cb, - }, - } - f.waiters = append(f.waiters, timer.waiter) - return timer -} - -// NewTimer is the Fake version of time.NewTimer(d). -func (f *FakeClock) NewTimer(d time.Duration) Timer { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - timer := &fakeTimer{ - fakeClock: f, - waiter: fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - }, - } - f.waiters = append(f.waiters, timer.waiter) - return timer -} - -// NewTicker returns a new Ticker. -func (f *FakeClock) NewTicker(d time.Duration) Ticker { - f.lock.Lock() - defer f.lock.Unlock() - tickTime := f.time.Add(d) - ch := make(chan time.Time, 1) // hold one tick - f.waiters = append(f.waiters, fakeClockWaiter{ - targetTime: tickTime, - stepInterval: d, - skipIfBlocked: true, - destChan: ch, - }) - - return &fakeTicker{ - c: ch, - } -} - -// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer -func (f *FakeClock) Step(d time.Duration) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(f.time.Add(d)) -} - -// SetTime sets the time on a FakeClock. -func (f *FakeClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(t) -} - -// Actually changes the time and checks any waiters. f must be write-locked. -func (f *FakeClock) setTimeLocked(t time.Time) { - f.time = t - newWaiters := make([]fakeClockWaiter, 0, len(f.waiters)) - for i := range f.waiters { - w := &f.waiters[i] - if !w.targetTime.After(t) { - - if w.skipIfBlocked { - select { - case w.destChan <- t: - default: - } - } else { - w.destChan <- t - } - - if w.afterFunc != nil { - w.afterFunc() - } - - if w.stepInterval > 0 { - for !w.targetTime.After(t) { - w.targetTime = w.targetTime.Add(w.stepInterval) - } - newWaiters = append(newWaiters, *w) - } - - } else { - newWaiters = append(newWaiters, f.waiters[i]) - } - } - f.waiters = newWaiters -} - -// HasWaiters returns true if After or AfterFunc has been called on f but not yet satisfied -// (so you can write race-free tests). -func (f *FakeClock) HasWaiters() bool { - f.lock.RLock() - defer f.lock.RUnlock() - return len(f.waiters) > 0 -} - -// Sleep pauses the FakeClock for duration d. -func (f *FakeClock) Sleep(d time.Duration) { - f.Step(d) -} - -// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration -type IntervalClock struct { - Time time.Time - Duration time.Duration -} - -// Now returns i's time. -func (i *IntervalClock) Now() time.Time { - i.Time = i.Time.Add(i.Duration) - return i.Time -} - -// Since returns time since the time in i. -func (i *IntervalClock) Since(ts time.Time) time.Duration { - return i.Time.Sub(ts) -} - -// After is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) After(d time.Duration) <-chan time.Time { - panic("IntervalClock doesn't implement After") -} - -// AfterFunc is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) AfterFunc(d time.Duration, cb func()) Timer { - panic("IntervalClock doesn't implement AfterFunc") -} - -// NewTimer is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) NewTimer(d time.Duration) Timer { - panic("IntervalClock doesn't implement NewTimer") -} - -// NewTicker is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) NewTicker(d time.Duration) Ticker { - panic("IntervalClock doesn't implement NewTicker") -} - -// Sleep is currently unimplemented; will panic. -func (*IntervalClock) Sleep(d time.Duration) { - panic("IntervalClock doesn't implement Sleep") -} - -// Timer allows for injecting fake or real timers into code that -// needs to do arbitrary things based on time. -type Timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// realTimer is backed by an actual time.Timer. -type realTimer struct { - timer *time.Timer -} - -// C returns the underlying timer's channel. -func (r *realTimer) C() <-chan time.Time { - return r.timer.C -} - -// Stop calls Stop() on the underlying timer. -func (r *realTimer) Stop() bool { - return r.timer.Stop() -} - -// Reset calls Reset() on the underlying timer. -func (r *realTimer) Reset(d time.Duration) bool { - return r.timer.Reset(d) -} - -// fakeTimer implements Timer based on a FakeClock. -type fakeTimer struct { - fakeClock *FakeClock - waiter fakeClockWaiter -} - -// C returns the channel that notifies when this timer has fired. -func (f *fakeTimer) C() <-chan time.Time { - return f.waiter.destChan -} - -// Stop conditionally stops the timer. If the timer has neither fired -// nor been stopped then this call stops the timer and returns true, -// otherwise this call returns false. This is like time.Timer::Stop. -func (f *fakeTimer) Stop() bool { - f.fakeClock.lock.Lock() - defer f.fakeClock.lock.Unlock() - // The timer has already fired or been stopped, unless it is found - // among the clock's waiters. - stopped := false - oldWaiters := f.fakeClock.waiters - newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters)) - seekChan := f.waiter.destChan - for i := range oldWaiters { - // Identify the timer's fakeClockWaiter by the identity of the - // destination channel, nothing else is necessarily unique and - // constant since the timer's creation. - if oldWaiters[i].destChan == seekChan { - stopped = true - } else { - newWaiters = append(newWaiters, oldWaiters[i]) - } - } - - f.fakeClock.waiters = newWaiters - - return stopped -} - -// Reset conditionally updates the firing time of the timer. If the -// timer has neither fired nor been stopped then this call resets the -// timer to the fake clock's "now" + d and returns true, otherwise -// it creates a new waiter, adds it to the clock, and returns true. -// -// It is not possible to return false, because a fake timer can be reset -// from any state (waiting to fire, already fired, and stopped). -// -// See the GoDoc for time.Timer::Reset for more context on why -// the return value of Reset() is not useful. -func (f *fakeTimer) Reset(d time.Duration) bool { - f.fakeClock.lock.Lock() - defer f.fakeClock.lock.Unlock() - waiters := f.fakeClock.waiters - seekChan := f.waiter.destChan - for i := range waiters { - if waiters[i].destChan == seekChan { - waiters[i].targetTime = f.fakeClock.time.Add(d) - return true - } - } - // No existing waiter, timer has already fired or been reset. - // We should still enable Reset() to succeed by creating a - // new waiter and adding it to the clock's waiters. - newWaiter := fakeClockWaiter{ - targetTime: f.fakeClock.time.Add(d), - destChan: seekChan, - } - f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter) - return true -} - -// Ticker defines the Ticker interface -type Ticker interface { - C() <-chan time.Time - Stop() -} - -type realTicker struct { - ticker *time.Ticker -} - -func (t *realTicker) C() <-chan time.Time { - return t.ticker.C -} - -func (t *realTicker) Stop() { - t.ticker.Stop() -} - -type fakeTicker struct { - c <-chan time.Time -} - -func (t *fakeTicker) C() <-chan time.Time { - return t.c -} - -func (t *fakeTicker) Stop() { -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 45aa74bf5..10df0d99c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -132,14 +132,14 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // Return whatever remaining data exists from an in progress frame if n := len(r.remaining); n > 0 { if n <= len(data) { - //lint:ignore SA4006,SA4010 underlying array of data is modified here. + //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], r.remaining...) r.remaining = nil return n, nil } n = len(data) - //lint:ignore SA4006,SA4010 underlying array of data is modified here. + //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], r.remaining[:n]...) r.remaining = r.remaining[n:] return n, io.ErrShortBuffer @@ -157,7 +157,7 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. if len(m) > n { - //lint:ignore SA4006,SA4010 underlying array of data is modified here. + //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. data = append(data[0:0], m[:n]...) r.remaining = m[n:] return n, io.ErrShortBuffer diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go index 2501d5516..a502b5adb 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go @@ -1,3 +1,4 @@ +//go:build !notest // +build !notest /* diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 778e58f70..55dba361c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -17,10 +17,11 @@ limitations under the License. package json import ( - "bytes" "encoding/json" "fmt" "io" + + kjson "sigs.k8s.io/json" ) // NewEncoder delegates to json.NewEncoder @@ -38,50 +39,11 @@ func Marshal(v interface{}) ([]byte, error) { // limit recursive depth to prevent stack overflow errors const maxDepth = 10000 -// Unmarshal unmarshals the given data -// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers -// are converted to int64 or float64 +// Unmarshal unmarshals the given data. +// Object keys are case-sensitive. +// Numbers decoded into interface{} fields are converted to int64 or float64. func Unmarshal(data []byte, v interface{}) error { - switch v := v.(type) { - case *map[string]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return ConvertMapNumbers(*v, 0) - - case *[]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return ConvertSliceNumbers(*v, 0) - - case *interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return ConvertInterfaceNumbers(v, 0) - - default: - return json.Unmarshal(data, v) - } + return kjson.UnmarshalCaseSensitivePreserveInts(data, v) } // ConvertInterfaceNumbers converts any json.Number values to int64 or float64. diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go index 590c16867..792badbc3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go @@ -75,6 +75,13 @@ func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldMan if !ok { return fmt.Errorf("unable to convert managed fields for %s to unstructured, expected map, got %T", fieldManager, u) } + + // set the type meta manually if it doesn't exist to avoid missing kind errors + // when decoding from unstructured JSON + if _, ok := m["kind"]; !ok && object.GetObjectKind().GroupVersionKind().Kind != "" { + m["kind"] = object.GetObjectKind().GroupVersionKind().Kind + m["apiVersion"] = object.GetObjectKind().GroupVersionKind().GroupVersion().String() + } if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, applyConfiguration); err != nil { return fmt.Errorf("error extracting into obj from unstructured: %w", err) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go similarity index 99% rename from vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go rename to vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go index f8abcaf04..0cc228af9 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/gvkparser.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package internal +package managedfields import ( "fmt" diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index d75ac6efa..04432b175 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "k8s.io/klog/v2" + netutils "k8s.io/utils/net" ) // JoinPreservingTrailingSlash does a path.Join of the specified elements, @@ -237,6 +238,29 @@ func DialerFor(transport http.RoundTripper) (DialFunc, error) { } } +// CloseIdleConnectionsFor close idles connections for the Transport. +// If the Transport is wrapped it iterates over the wrapped round trippers +// until it finds one that implements the CloseIdleConnections method. +// If the Transport does not have a CloseIdleConnections method +// then this function does nothing. +func CloseIdleConnectionsFor(transport http.RoundTripper) { + if transport == nil { + return + } + type closeIdler interface { + CloseIdleConnections() + } + + switch transport := transport.(type) { + case closeIdler: + transport.CloseIdleConnections() + case RoundTripperWrapper: + CloseIdleConnectionsFor(transport.WrappedRoundTripper()) + default: + klog.Warningf("unknown transport type: %T", transport) + } +} + type TLSClientConfigHolder interface { TLSClientConfig() *tls.Config } @@ -289,7 +313,7 @@ func SourceIPs(req *http.Request) []net.IP { // Use the first valid one. parts := strings.Split(hdrForwardedFor, ",") for _, part := range parts { - ip := net.ParseIP(strings.TrimSpace(part)) + ip := netutils.ParseIPSloppy(strings.TrimSpace(part)) if ip != nil { srcIPs = append(srcIPs, ip) } @@ -299,7 +323,7 @@ func SourceIPs(req *http.Request) []net.IP { // Try the X-Real-Ip header. hdrRealIp := hdr.Get("X-Real-Ip") if hdrRealIp != "" { - ip := net.ParseIP(hdrRealIp) + ip := netutils.ParseIPSloppy(hdrRealIp) // Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain. if ip != nil && !containsIP(srcIPs, ip) { srcIPs = append(srcIPs, ip) @@ -311,11 +335,11 @@ func SourceIPs(req *http.Request) []net.IP { // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. host, _, err := net.SplitHostPort(req.RemoteAddr) if err == nil { - remoteIP = net.ParseIP(host) + remoteIP = netutils.ParseIPSloppy(host) } // Fallback if Remote Address was just IP. if remoteIP == nil { - remoteIP = net.ParseIP(req.RemoteAddr) + remoteIP = netutils.ParseIPSloppy(req.RemoteAddr) } // Don't duplicate remote IP if it's already the last address in the chain. @@ -382,7 +406,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error cidrs := []*net.IPNet{} for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) + _, cidr, _ := netutils.ParseCIDRSloppy(noProxyRule) if cidr != nil { cidrs = append(cidrs, cidr) } @@ -393,7 +417,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error } return func(req *http.Request) (*url.URL, error) { - ip := net.ParseIP(req.URL.Hostname()) + ip := netutils.ParseIPSloppy(req.URL.Hostname()) if ip == nil { return delegate(req) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index 9adf4cfe4..822416806 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -27,6 +27,7 @@ import ( "strings" "k8s.io/klog/v2" + netutils "k8s.io/utils/net" ) type AddressFamily uint @@ -221,7 +222,7 @@ func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) if len(addrs) > 0 { for i := range addrs { klog.V(4).Infof("Checking addr %s.", addrs[i].String()) - ip, _, err := net.ParseCIDR(addrs[i].String()) + ip, _, err := netutils.ParseCIDRSloppy(addrs[i].String()) if err != nil { return nil, err } @@ -336,7 +337,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFam continue } for _, addr := range addrs { - ip, _, err := net.ParseCIDR(addr.String()) + ip, _, err := netutils.ParseCIDRSloppy(addr.String()) if err != nil { return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go index c31de15e7..d49a56536 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -31,22 +31,22 @@ type PatchMeta struct { patchMergeKey string } -func (pm PatchMeta) GetPatchStrategies() []string { +func (pm *PatchMeta) GetPatchStrategies() []string { if pm.patchStrategies == nil { return []string{} } return pm.patchStrategies } -func (pm PatchMeta) SetPatchStrategies(ps []string) { +func (pm *PatchMeta) SetPatchStrategies(ps []string) { pm.patchStrategies = ps } -func (pm PatchMeta) GetPatchMergeKey() string { +func (pm *PatchMeta) GetPatchMergeKey() string { return pm.patchMergeKey } -func (pm PatchMeta) SetPatchMergeKey(pmk string) { +func (pm *PatchMeta) SetPatchMergeKey(pmk string) { pm.patchMergeKey = pmk } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index fd2081a28..1aedaa156 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1328,15 +1328,19 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me _, ok := original[k] if !ok { - // If it's not in the original document, just take the patch value. - original[k] = patchV + if !isDeleteList { + // If it's not in the original document, just take the patch value. + original[k] = patchV + } continue } originalType := reflect.TypeOf(original[k]) patchType := reflect.TypeOf(patchV) if originalType != patchType { - original[k] = patchV + if !isDeleteList { + original[k] = patchV + } continue } // If they're both maps or lists, recurse into the value. diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index c283ad189..2ed368f56 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -239,6 +239,9 @@ func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { // ToAggregate converts the ErrorList into an errors.Aggregate. func (list ErrorList) ToAggregate() utilerrors.Aggregate { + if len(list) == 0 { + return nil + } errs := make([]error, 0, len(list)) errorMsgs := sets.NewString() for _, err := range list { diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index c8b419984..83df4fb8d 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -25,6 +25,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/util/validation/field" + netutils "k8s.io/utils/net" ) const qnameCharFmt string = "[A-Za-z0-9]" @@ -346,7 +347,7 @@ func IsValidPortName(port string) []string { // IsValidIP tests that the argument is a valid IP address. func IsValidIP(value string) []string { - if net.ParseIP(value) == nil { + if netutils.ParseIPSloppy(value) == nil { return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"} } return nil @@ -355,7 +356,7 @@ func IsValidIP(value string) []string { // IsValidIPv4Address tests that the argument is a valid IPv4 address. func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { var allErrors field.ErrorList - ip := net.ParseIP(value) + ip := netutils.ParseIPSloppy(value) if ip == nil || ip.To4() == nil { allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) } @@ -365,7 +366,7 @@ func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { // IsValidIPv6Address tests that the argument is a valid IPv6 address. func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { var allErrors field.ErrorList - ip := net.ParseIP(value) + ip := netutils.ParseIPSloppy(value) if ip == nil || ip.To4() != nil { allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index afb24876a..ec5be90b8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -24,8 +24,8 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/clock" ) // For any test of the style: @@ -166,6 +166,9 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan // of every loop to prevent extra executions of f(). select { case <-stopCh: + if !t.Stop() { + <-t.C() + } return case <-t.C(): } diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 56f4262ac..9837b3df2 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -59,6 +59,34 @@ func Unmarshal(data []byte, v interface{}) error { } } +// UnmarshalStrict unmarshals the given data +// strictly (erroring when there are duplicate fields). +func UnmarshalStrict(data []byte, v interface{}) error { + preserveIntFloat := func(d *json.Decoder) *json.Decoder { + d.UseNumber() + return d + } + switch v := v.(type) { + case *map[string]interface{}: + if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertMapNumbers(*v, 0) + case *[]interface{}: + if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertSliceNumbers(*v, 0) + case *interface{}: + if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil { + return err + } + return jsonutil.ConvertInterfaceNumbers(v, 0) + default: + return yaml.UnmarshalStrict(data, v) + } +} + // ToJSON converts a single YAML document into a JSON document // or returns an error. If the document appears to be JSON the // YAML decoding path is not used (so that error messages are diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go index 71ef4da33..dd27d4526 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/apiserver/pkg/admission/audit.go b/vendor/k8s.io/apiserver/pkg/admission/audit.go index d1e103cfc..02694b0a9 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/audit.go +++ b/vendor/k8s.io/apiserver/pkg/admission/audit.go @@ -19,6 +19,7 @@ package admission import ( "context" "fmt" + "sync" auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/audit" @@ -27,7 +28,10 @@ import ( // auditHandler logs annotations set by other admission handlers type auditHandler struct { Interface - ae *auditinternal.Event + // TODO: move the lock near the Annotations field of the audit event so it is always protected from concurrent access. + // to protect the 'Annotations' map of the audit event from concurrent writes + mutex sync.Mutex + ae *auditinternal.Event } var _ Interface = &auditHandler{} @@ -42,10 +46,10 @@ func WithAudit(i Interface, ae *auditinternal.Event) Interface { if i == nil { return i } - return &auditHandler{i, ae} + return &auditHandler{Interface: i, ae: ae} } -func (handler auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { +func (handler *auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { if !handler.Interface.Handles(a.GetOperation()) { return nil } @@ -60,7 +64,7 @@ func (handler auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInt return err } -func (handler auditHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { +func (handler *auditHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { if !handler.Interface.Handles(a.GetOperation()) { return nil } @@ -84,10 +88,13 @@ func ensureAnnotationGetter(a Attributes) error { return fmt.Errorf("attributes must be an instance of privateAnnotationsGetter or AnnotationsGetter") } -func (handler auditHandler) logAnnotations(a Attributes) { +func (handler *auditHandler) logAnnotations(a Attributes) { if handler.ae == nil { return } + handler.mutex.Lock() + defer handler.mutex.Unlock() + switch a := a.(type) { case privateAnnotationsGetter: for key, value := range a.getAnnotations(handler.ae.Level) { diff --git a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go index 82752fe08..757e26882 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go @@ -18,7 +18,6 @@ package metrics import ( "context" - "fmt" "strconv" "time" @@ -45,8 +44,6 @@ const ( ) var ( - // Use buckets ranging from 5 ms to 2.5 seconds (admission webhooks timeout at 30 seconds by default). - latencyBuckets = []float64{0.005, 0.025, 0.1, 0.5, 2.5} latencySummaryMaxAge = 5 * time.Hour // Metrics provides access to all admission metrics. @@ -119,25 +116,75 @@ type AdmissionMetrics struct { controller *metricSet webhook *metricSet webhookRejection *metrics.CounterVec + webhookRequest *metrics.CounterVec } // newAdmissionMetrics create a new AdmissionMetrics, configured with default metric names. func newAdmissionMetrics() *AdmissionMetrics { // Admission metrics for a step of the admission flow. The entire admission flow is broken down into a series of steps // Each step is identified by a distinct type label value. - step := newMetricSet("step", - []string{"type", "operation", "rejected"}, - "Admission sub-step %s, broken out for each operation and API resource and step type (validate or admit).", true) + // Use buckets ranging from 5 ms to 2.5 seconds. + step := &metricSet{ + latencies: metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "step_admission_duration_seconds", + Help: "Admission sub-step latency histogram in seconds, broken out for each operation and API resource and step type (validate or admit).", + Buckets: []float64{0.005, 0.025, 0.1, 0.5, 1.0, 2.5}, + StabilityLevel: metrics.STABLE, + }, + []string{"type", "operation", "rejected"}, + ), + + latenciesSummary: metrics.NewSummaryVec( + &metrics.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "step_admission_duration_seconds_summary", + Help: "Admission sub-step latency summary in seconds, broken out for each operation and API resource and step type (validate or admit).", + MaxAge: latencySummaryMaxAge, + StabilityLevel: metrics.ALPHA, + }, + []string{"type", "operation", "rejected"}, + ), + } // Built-in admission controller metrics. Each admission controller is identified by name. - controller := newMetricSet("controller", - []string{"name", "type", "operation", "rejected"}, - "Admission controller %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) + // Use buckets ranging from 5 ms to 2.5 seconds. + controller := &metricSet{ + latencies: metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "controller_admission_duration_seconds", + Help: "Admission controller latency histogram in seconds, identified by name and broken out for each operation and API resource and type (validate or admit).", + Buckets: []float64{0.005, 0.025, 0.1, 0.5, 1.0, 2.5}, + StabilityLevel: metrics.STABLE, + }, + []string{"name", "type", "operation", "rejected"}, + ), + + latenciesSummary: nil, + } // Admission webhook metrics. Each webhook is identified by name. - webhook := newMetricSet("webhook", - []string{"name", "type", "operation", "rejected"}, - "Admission webhook %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) + // Use buckets ranging from 5 ms to 2.5 seconds (admission webhooks timeout at 30 seconds by default). + webhook := &metricSet{ + latencies: metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "webhook_admission_duration_seconds", + Help: "Admission webhook latency histogram in seconds, identified by name and broken out for each operation and API resource and type (validate or admit).", + Buckets: []float64{0.005, 0.025, 0.1, 0.5, 1.0, 2.5}, + StabilityLevel: metrics.STABLE, + }, + []string{"name", "type", "operation", "rejected"}, + ), + + latenciesSummary: nil, + } webhookRejection := metrics.NewCounterVec( &metrics.CounterOpts{ @@ -149,11 +196,22 @@ func newAdmissionMetrics() *AdmissionMetrics { }, []string{"name", "type", "operation", "error_type", "rejection_code"}) + webhookRequest := metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "webhook_request_total", + Help: "Admission webhook request total, identified by name and broken out for each admission type (validating or mutating) and operation. Additional labels specify whether the request was rejected or not and an HTTP status code. Codes greater than 600 are truncated to 600, to keep the metrics cardinality bounded.", + StabilityLevel: metrics.ALPHA, + }, + []string{"name", "type", "operation", "code", "rejected"}) + step.mustRegister() controller.mustRegister() webhook.mustRegister() legacyregistry.MustRegister(webhookRejection) - return &AdmissionMetrics{step: step, controller: controller, webhook: webhook, webhookRejection: webhookRejection} + legacyregistry.MustRegister(webhookRequest) + return &AdmissionMetrics{step: step, controller: controller, webhook: webhook, webhookRejection: webhookRejection, webhookRequest: webhookRequest} } func (m *AdmissionMetrics) reset() { @@ -173,8 +231,13 @@ func (m *AdmissionMetrics) ObserveAdmissionController(ctx context.Context, elaps } // ObserveWebhook records admission related metrics for a admission webhook. -func (m *AdmissionMetrics) ObserveWebhook(ctx context.Context, elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { - m.webhook.observe(ctx, elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) +func (m *AdmissionMetrics) ObserveWebhook(ctx context.Context, name string, elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, code int) { + // We truncate codes greater than 600 to keep the cardinality bounded. + if code > 600 { + code = 600 + } + m.webhookRequest.WithContext(ctx).WithLabelValues(name, stepType, string(attr.GetOperation()), strconv.Itoa(code), strconv.FormatBool(rejected)).Inc() + m.webhook.observe(ctx, elapsed, name, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected)) } // ObserveWebhookRejection records admission related metrics for an admission webhook rejection. @@ -192,39 +255,6 @@ type metricSet struct { latenciesSummary *metrics.SummaryVec } -func newMetricSet(name string, labels []string, helpTemplate string, hasSummary bool) *metricSet { - var summary *metrics.SummaryVec - if hasSummary { - summary = metrics.NewSummaryVec( - &metrics.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: fmt.Sprintf("%s_admission_duration_seconds_summary", name), - Help: fmt.Sprintf(helpTemplate, "latency summary in seconds"), - MaxAge: latencySummaryMaxAge, - StabilityLevel: metrics.ALPHA, - }, - labels, - ) - } - - return &metricSet{ - latencies: metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: fmt.Sprintf("%s_admission_duration_seconds", name), - Help: fmt.Sprintf(helpTemplate, "latency histogram in seconds"), - Buckets: latencyBuckets, - StabilityLevel: metrics.ALPHA, - }, - labels, - ), - - latenciesSummary: summary, - } -} - // MustRegister registers all the prometheus metrics in the metricSet. func (m *metricSet) mustRegister() { legacyregistry.MustRegister(m.latencies) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index c417e3f98..936a95e45 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -29,13 +29,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilcache "k8s.io/apimachinery/pkg/util/cache" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission/initializer" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/utils/clock" ) const ( diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go index 65eb414fc..4cf69291b 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go index 99fc6a6fa..839c1fc7a 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go index cce2e603a..dac177e93 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go index eadb147c4..66aaecbd8 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go index a59d62d6c..f997f4aba 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go index dd621a3ac..5070cb91b 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go index 90b7e0ae6..27c3ede0d 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go index 3b35bb31e..16642c183 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer/json" + utiljson "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/admission" admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" @@ -42,6 +43,7 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" webhookrequest "k8s.io/apiserver/pkg/admission/plugin/webhook/request" auditinternal "k8s.io/apiserver/pkg/apis/audit" + endpointsrequest "k8s.io/apiserver/pkg/endpoints/request" webhookutil "k8s.io/apiserver/pkg/util/webhook" "k8s.io/apiserver/pkg/warning" utiltrace "k8s.io/utils/trace" @@ -61,8 +63,6 @@ const ( MutationAuditAnnotationFailedOpenKeyPrefix string = "failed-open." + MutationAuditAnnotationPrefix ) -var encodingjson = json.CaseSensitiveJSONIterator() - type mutatingDispatcher struct { cm *webhookutil.ClientManager plugin *Plugin @@ -148,17 +148,21 @@ func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr admission.Attrib case *webhookutil.ErrCallingWebhook: if !ignoreClientCallFailures { rejected = true - admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, 0) + admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, int(err.Status.ErrStatus.Code)) } + admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "admit", int(err.Status.ErrStatus.Code)) case *webhookutil.ErrWebhookRejection: rejected = true admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionNoError, int(err.Status.ErrStatus.Code)) + admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "admit", int(err.Status.ErrStatus.Code)) default: rejected = true admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionAPIServerInternalError, 0) + admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "admit", 0) } + } else { + admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "admit", 200) } - admissionmetrics.Metrics.ObserveWebhook(ctx, time.Since(t), rejected, versionedAttr.Attributes, "admit", hook.Name) if changed { // Patch had changed the object. Prepare to reinvoke all previous webhooks that are eligible for re-invocation. webhookReinvokeCtx.RequireReinvokingPreviouslyInvokedPlugins() @@ -213,7 +217,7 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admiss defer func() { annotator.addMutationAnnotation(changed) }() if attr.Attributes.IsDryRun() { if h.SideEffects == nil { - return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook SideEffects is nil")} + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook SideEffects is nil"), Status: apierrors.NewBadRequest("Webhook SideEffects is nil")} } if !(*h.SideEffects == admissionregistrationv1.SideEffectClassNone || *h.SideEffects == admissionregistrationv1.SideEffectClassNoneOnDryRun) { return false, webhookerrors.NewDryRunUnsupportedErr(h.Name) @@ -222,12 +226,12 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admiss uid, request, response, err := webhookrequest.CreateAdmissionObjects(attr, invocation) if err != nil { - return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("could not create admission objects: %w", err)} + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("could not create admission objects: %w", err), Status: apierrors.NewBadRequest("error creating admission objects")} } // Make the webhook request client, err := invocation.Webhook.GetRESTClient(a.cm) if err != nil { - return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("could not get REST client: %w", err)} + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("could not get REST client: %w", err), Status: apierrors.NewBadRequest("error getting REST client")} } trace := utiltrace.New("Call mutating webhook", utiltrace.Field{"configuration", configurationName}, @@ -260,14 +264,26 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admiss } } - if err := r.Do(ctx).Into(response); err != nil { - return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("failed to call webhook: %w", err)} + do := func() { err = r.Do(ctx).Into(response) } + if wd, ok := endpointsrequest.WebhookDurationFrom(ctx); ok { + tmp := do + do = func() { wd.AdmitTracker.Track(tmp) } + } + do() + if err != nil { + var status *apierrors.StatusError + if se, ok := err.(*apierrors.StatusError); ok { + status = se + } else { + status = apierrors.NewBadRequest("error calling webhook") + } + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("failed to call webhook: %w", err), Status: status} } trace.Step("Request completed") result, err := webhookrequest.VerifyAdmissionResponse(uid, true, response) if err != nil { - return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("received invalid webhook response: %w", err)} + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("received invalid webhook response: %w", err), Status: apierrors.NewServiceUnavailable("error validating webhook response")} } for k, v := range result.AuditAnnotations { @@ -315,7 +331,7 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admiss return false, apierrors.NewInternalError(err) } default: - return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("unsupported patch type %q", result.PatchType)} + return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("unsupported patch type %q", result.PatchType), Status: webhookerrors.ToStatusErr(h.Name, result.Result)} } var newVersionedObject runtime.Object @@ -434,7 +450,7 @@ func mutationAnnotationValue(configuration, webhook string, mutated bool) (strin Webhook: webhook, Mutated: mutated, } - bytes, err := encodingjson.Marshal(m) + bytes, err := utiljson.Marshal(m) return string(bytes), err } @@ -445,6 +461,6 @@ func jsonPatchAnnotationValue(configuration, webhook string, patch interface{}) Patch: patch, PatchType: string(admissionv1.PatchTypeJSONPatch), } - bytes, err := encodingjson.Marshal(p) + bytes, err := utiljson.Marshal(p) return string(bytes), err } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go index 250bb02a7..56099695a 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go @@ -32,6 +32,7 @@ import ( webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" webhookrequest "k8s.io/apiserver/pkg/admission/plugin/webhook/request" + endpointsrequest "k8s.io/apiserver/pkg/endpoints/request" webhookutil "k8s.io/apiserver/pkg/util/webhook" "k8s.io/apiserver/pkg/warning" "k8s.io/klog/v2" @@ -119,18 +120,20 @@ func (d *validatingDispatcher) Dispatch(ctx context.Context, attr admission.Attr case *webhookutil.ErrCallingWebhook: if !ignoreClientCallFailures { rejected = true - admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, 0) + admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, int(err.Status.ErrStatus.Code)) } + admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "validating", int(err.Status.ErrStatus.Code)) case *webhookutil.ErrWebhookRejection: rejected = true admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionNoError, int(err.Status.ErrStatus.Code)) + admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "validating", int(err.Status.ErrStatus.Code)) default: rejected = true admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionAPIServerInternalError, 0) + admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "validating", 0) } - } - admissionmetrics.Metrics.ObserveWebhook(ctx, time.Since(t), rejected, versionedAttr.Attributes, "validating", hook.Name) - if err == nil { + } else { + admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "validating", 200) return } @@ -181,7 +184,7 @@ func (d *validatingDispatcher) Dispatch(ctx context.Context, attr admission.Attr func (d *validatingDispatcher) callHook(ctx context.Context, h *v1.ValidatingWebhook, invocation *generic.WebhookInvocation, attr *generic.VersionedAttributes) error { if attr.Attributes.IsDryRun() { if h.SideEffects == nil { - return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook SideEffects is nil")} + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook SideEffects is nil"), Status: apierrors.NewBadRequest("Webhook SideEffects is nil")} } if !(*h.SideEffects == v1.SideEffectClassNone || *h.SideEffects == v1.SideEffectClassNoneOnDryRun) { return webhookerrors.NewDryRunUnsupportedErr(h.Name) @@ -190,12 +193,12 @@ func (d *validatingDispatcher) callHook(ctx context.Context, h *v1.ValidatingWeb uid, request, response, err := webhookrequest.CreateAdmissionObjects(attr, invocation) if err != nil { - return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("could not create admission objects: %w", err), Status: apierrors.NewBadRequest("error creating admission objects")} } // Make the webhook request client, err := invocation.Webhook.GetRESTClient(d.cm) if err != nil { - return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("could not get REST client: %w", err), Status: apierrors.NewBadRequest("error getting REST client")} } trace := utiltrace.New("Call validating webhook", utiltrace.Field{"configuration", invocation.Webhook.GetConfigurationName()}, @@ -228,14 +231,26 @@ func (d *validatingDispatcher) callHook(ctx context.Context, h *v1.ValidatingWeb } } - if err := r.Do(ctx).Into(response); err != nil { - return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + do := func() { err = r.Do(ctx).Into(response) } + if wd, ok := endpointsrequest.WebhookDurationFrom(ctx); ok { + tmp := do + do = func() { wd.ValidateTracker.Track(tmp) } + } + do() + if err != nil { + var status *apierrors.StatusError + if se, ok := err.(*apierrors.StatusError); ok { + status = se + } else { + status = apierrors.NewBadRequest("error calling webhook") + } + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("failed to call webhook: %w", err), Status: status} } trace.Step("Request completed") result, err := webhookrequest.VerifyAdmissionResponse(uid, false, response) if err != nil { - return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("received invalid webhook response: %w", err), Status: apierrors.NewServiceUnavailable("error validating webhook response")} } for k, v := range result.AuditAnnotations { diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go index 6b30c0fc2..1d723d5e3 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -62,8 +62,7 @@ type EgressSelectorConfiguration struct { // EgressSelection provides the configuration for a single egress selection client. type EgressSelection struct { // Name is the name of the egress selection. - // Currently supported values are "controlplane", "master", "etcd" and "cluster" - // The "master" egress selector is deprecated in favor of "controlplane" + // Currently supported values are "controlplane", "etcd" and "cluster" Name string // Connection is the exact information used to configure the egress selection diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go index 5116139ae..22562c87a 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go index a0e039de0..d1bc5e01f 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go index cce2e603a..dac177e93 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go new file mode 100644 index 000000000..31b8b7064 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go @@ -0,0 +1,32 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { + if err := autoConvert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in, out, s); err != nil { + return err + } + if out.Name == "master" { + out.Name = "controlplane" + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go index 0a5f3d413..e60f2f3b7 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -65,11 +66,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelection)(nil), (*EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(a.(*apiserver.EgressSelection), b.(*EgressSelection), scope) }); err != nil { @@ -135,6 +131,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) + }); err != nil { + return err + } return nil } @@ -212,11 +213,6 @@ func autoConvert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in *Egres return nil } -// Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection is an autogenerated conversion function. -func Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { - return autoConvert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in, out, s) -} - func autoConvert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error { out.Name = in.Name if err := Convert_apiserver_Connection_To_v1alpha1_Connection(&in.Connection, &out.Connection, s); err != nil { @@ -231,7 +227,17 @@ func Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in *apiserver } func autoConvert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error { - out.EgressSelections = *(*[]apiserver.EgressSelection)(unsafe.Pointer(&in.EgressSelections)) + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]apiserver.EgressSelection, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.EgressSelections = nil + } return nil } @@ -241,7 +247,17 @@ func Convert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorCon } func autoConvert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error { - out.EgressSelections = *(*[]EgressSelection)(unsafe.Pointer(&in.EgressSelections)) + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]EgressSelection, len(*in)) + for i := range *in { + if err := Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.EgressSelections = nil + } return nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go index 3f0fae3f8..0e95103e0 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go index dd621a3ac..5070cb91b 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/conversion.go new file mode 100644 index 000000000..d7110eff1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/conversion.go @@ -0,0 +1,32 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { + if err := autoConvert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in, out, s); err != nil { + return err + } + if out.Name == "master" { + out.Name = "controlplane" + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go index b0e44b7e1..37b0f2f7b 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -45,11 +46,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelection)(nil), (*EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(a.(*apiserver.EgressSelection), b.(*EgressSelection), scope) }); err != nil { @@ -105,6 +101,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) + }); err != nil { + return err + } return nil } @@ -138,11 +139,6 @@ func autoConvert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in *Egress return nil } -// Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection is an autogenerated conversion function. -func Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { - return autoConvert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in, out, s) -} - func autoConvert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error { out.Name = in.Name if err := Convert_apiserver_Connection_To_v1beta1_Connection(&in.Connection, &out.Connection, s); err != nil { @@ -157,7 +153,17 @@ func Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in *apiserver. } func autoConvert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error { - out.EgressSelections = *(*[]apiserver.EgressSelection)(unsafe.Pointer(&in.EgressSelections)) + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]apiserver.EgressSelection, len(*in)) + for i := range *in { + if err := Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.EgressSelections = nil + } return nil } @@ -167,7 +173,17 @@ func Convert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConf } func autoConvert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error { - out.EgressSelections = *(*[]EgressSelection)(unsafe.Pointer(&in.EgressSelections)) + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]EgressSelection, len(*in)) + for i := range *in { + if err := Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.EgressSelections = nil + } return nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go index eda0eaffc..bb1819cac 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go index 73e63fc11..198b5be4a 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go index 55321b084..86acce65f 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go index e94a60093..596e02202 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go @@ -172,6 +172,15 @@ type Policy struct { // be specified per rule in which case the union of both are omitted. // +optional OmitStages []Stage + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // This is used as a global default - a value of 'true' will omit the managed fileds, + // otherwise the managed fields will be included in the API audit log. + // Note that this can also be specified per rule in which case the value specified + // in a rule will override the global default. + // +optional + OmitManagedFields bool } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -232,6 +241,17 @@ type PolicyRule struct { // An empty list means no restrictions will apply. // +optional OmitStages []Stage + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // - a value of 'true' will drop the managed fields from the API audit log + // - a value of 'false' indicates that the managed fileds should be included + // in the API audit log + // Note that the value, if specified, in this rule will override the global default + // If a value is not specified then the global default specified in + // Policy.OmitManagedFields will stand. + // +optional + OmitManagedFields *bool } // GroupResources represents resource kinds in an API group. diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go index c569f5068..32b1e98e2 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go @@ -261,85 +261,88 @@ func init() { } var fileDescriptor_4982ac40a460d730 = []byte{ - // 1243 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xcd, 0x8e, 0x1b, 0x45, - 0x10, 0xde, 0x59, 0xaf, 0xb3, 0x76, 0x3b, 0xeb, 0x75, 0x3a, 0x11, 0x19, 0xed, 0xc1, 0x36, 0x46, - 0x42, 0x06, 0x96, 0x99, 0xec, 0x12, 0x48, 0x14, 0x09, 0x24, 0x5b, 0x89, 0xc0, 0x22, 0xd9, 0xac, - 0xda, 0x38, 0x07, 0xc4, 0x21, 0xe3, 0x71, 0xc5, 0x1e, 0x6c, 0xcf, 0x4c, 0xba, 0x7b, 0x8c, 0xf6, - 0xc6, 0x0b, 0x20, 0x71, 0xe7, 0x2d, 0xb8, 0x45, 0xbc, 0x40, 0x8e, 0x39, 0xe6, 0x64, 0x11, 0xc3, - 0x43, 0xa0, 0x9c, 0x50, 0xff, 0xcc, 0x8f, 0xbd, 0x6b, 0xc5, 0xcb, 0x81, 0xdb, 0x74, 0xd5, 0xf7, - 0x7d, 0x55, 0x5d, 0xd3, 0x55, 0xdd, 0xe8, 0xdb, 0xf1, 0x5d, 0x66, 0x79, 0x81, 0x3d, 0x8e, 0xfa, - 0x40, 0x7d, 0xe0, 0xc0, 0xec, 0x19, 0xf8, 0x83, 0x80, 0xda, 0xda, 0xe1, 0x84, 0x1e, 0x03, 0x3a, - 0x03, 0x6a, 0x87, 0xe3, 0xa1, 0x5c, 0xd9, 0x4e, 0x34, 0xf0, 0xb8, 0x3d, 0x3b, 0xb2, 0x87, 0xe0, - 0x03, 0x75, 0x38, 0x0c, 0xac, 0x90, 0x06, 0x3c, 0xc0, 0x0d, 0xc5, 0xb1, 0x12, 0x8e, 0x15, 0x8e, - 0x87, 0x72, 0x65, 0x49, 0x8e, 0x35, 0x3b, 0x3a, 0xf8, 0x74, 0xe8, 0xf1, 0x51, 0xd4, 0xb7, 0xdc, - 0x60, 0x6a, 0x0f, 0x83, 0x61, 0x60, 0x4b, 0x6a, 0x3f, 0x7a, 0x26, 0x57, 0x72, 0x21, 0xbf, 0x94, - 0xe4, 0xc1, 0x61, 0x9a, 0x86, 0xed, 0x44, 0x7c, 0x04, 0x3e, 0xf7, 0x5c, 0x87, 0x7b, 0x81, 0x7f, - 0x41, 0x02, 0x07, 0xb7, 0x53, 0xf4, 0xd4, 0x71, 0x47, 0x9e, 0x0f, 0xf4, 0x2c, 0xcd, 0x7b, 0x0a, - 0xdc, 0xb9, 0x88, 0x65, 0xaf, 0x63, 0xd1, 0xc8, 0xe7, 0xde, 0x14, 0xce, 0x11, 0xbe, 0x78, 0x17, - 0x81, 0xb9, 0x23, 0x98, 0x3a, 0xab, 0xbc, 0xc6, 0xdf, 0x08, 0xe5, 0x1f, 0xcc, 0xc0, 0xe7, 0xf8, - 0x10, 0xe5, 0x27, 0x30, 0x83, 0x89, 0x69, 0xd4, 0x8d, 0x66, 0xb1, 0xfd, 0xde, 0xcb, 0x79, 0x6d, - 0x6b, 0x31, 0xaf, 0xe5, 0x1f, 0x0a, 0xe3, 0xdb, 0xf8, 0x83, 0x28, 0x10, 0x3e, 0x41, 0xbb, 0xb2, - 0x7e, 0x9d, 0xfb, 0xe6, 0xb6, 0xc4, 0xdf, 0xd6, 0xf8, 0xdd, 0x96, 0x32, 0xbf, 0x9d, 0xd7, 0xde, - 0x5f, 0x97, 0x13, 0x3f, 0x0b, 0x81, 0x59, 0xbd, 0xce, 0x7d, 0x12, 0x8b, 0x88, 0xe8, 0x8c, 0x3b, - 0x43, 0x30, 0x73, 0xcb, 0xd1, 0xbb, 0xc2, 0xf8, 0x36, 0xfe, 0x20, 0x0a, 0x84, 0x8f, 0x11, 0xa2, - 0xf0, 0x3c, 0x02, 0xc6, 0x7b, 0xa4, 0x63, 0xee, 0x48, 0x0a, 0xd6, 0x14, 0x44, 0x12, 0x0f, 0xc9, - 0xa0, 0x70, 0x1d, 0xed, 0xcc, 0x80, 0xf6, 0xcd, 0xbc, 0x44, 0x5f, 0xd5, 0xe8, 0x9d, 0x27, 0x40, - 0xfb, 0x44, 0x7a, 0xf0, 0x37, 0x68, 0x27, 0x62, 0x40, 0xcd, 0x2b, 0x75, 0xa3, 0x59, 0x3a, 0xfe, - 0xd0, 0x4a, 0x8f, 0x8e, 0xb5, 0xfc, 0x9f, 0xad, 0xd9, 0x91, 0xd5, 0x63, 0x40, 0x3b, 0xfe, 0xb3, - 0x20, 0x55, 0x12, 0x16, 0x22, 0x15, 0xf0, 0x08, 0x55, 0xbc, 0x69, 0x08, 0x94, 0x05, 0xbe, 0xa8, - 0xb5, 0xf0, 0x98, 0xbb, 0x97, 0x52, 0xbd, 0xb1, 0x98, 0xd7, 0x2a, 0x9d, 0x15, 0x0d, 0x72, 0x4e, - 0x15, 0x7f, 0x82, 0x8a, 0x2c, 0x88, 0xa8, 0x0b, 0x9d, 0x53, 0x66, 0x16, 0xea, 0xb9, 0x66, 0xb1, - 0xbd, 0xb7, 0x98, 0xd7, 0x8a, 0xdd, 0xd8, 0x48, 0x52, 0x3f, 0xb6, 0x51, 0x51, 0xa4, 0xd7, 0x1a, - 0x82, 0xcf, 0xcd, 0x8a, 0xac, 0xc3, 0x35, 0x9d, 0x7d, 0xb1, 0x17, 0x3b, 0x48, 0x8a, 0xc1, 0x4f, - 0x51, 0x31, 0xe8, 0xff, 0x08, 0x2e, 0x27, 0xf0, 0xcc, 0x2c, 0xca, 0x0d, 0x7c, 0x66, 0xbd, 0xbb, - 0xa3, 0xac, 0xc7, 0x31, 0x09, 0x28, 0xf8, 0x2e, 0xa8, 0x94, 0x12, 0x23, 0x49, 0x45, 0xf1, 0x08, - 0x95, 0x29, 0xb0, 0x30, 0xf0, 0x19, 0x74, 0xb9, 0xc3, 0x23, 0x66, 0x22, 0x19, 0xe6, 0x30, 0x13, - 0x26, 0x39, 0x3c, 0x69, 0x24, 0xd1, 0x37, 0x22, 0x90, 0xe2, 0xb4, 0xf1, 0x62, 0x5e, 0x2b, 0x93, - 0x25, 0x1d, 0xb2, 0xa2, 0x8b, 0x1d, 0xb4, 0xa7, 0x4f, 0x83, 0x4a, 0xc4, 0x2c, 0xc9, 0x40, 0xcd, - 0xb5, 0x81, 0x74, 0xe7, 0x58, 0x3d, 0x7f, 0xec, 0x07, 0x3f, 0xf9, 0xed, 0x6b, 0x8b, 0x79, 0x6d, - 0x8f, 0x64, 0x25, 0xc8, 0xb2, 0x22, 0x1e, 0xa4, 0x9b, 0xd1, 0x31, 0xae, 0x5e, 0x32, 0xc6, 0xd2, - 0x46, 0x74, 0x90, 0x15, 0x4d, 0xfc, 0x8b, 0x81, 0x4c, 0x1d, 0x97, 0x80, 0x0b, 0xde, 0x0c, 0x06, - 0xdf, 0x79, 0x53, 0x60, 0xdc, 0x99, 0x86, 0xe6, 0x9e, 0x0c, 0x68, 0x6f, 0x56, 0xbd, 0x47, 0x9e, - 0x4b, 0x03, 0xc1, 0x6d, 0xd7, 0xf5, 0x31, 0x30, 0xc9, 0x1a, 0x61, 0xb2, 0x36, 0x24, 0x0e, 0x50, - 0x59, 0x76, 0x65, 0x9a, 0x44, 0xf9, 0xbf, 0x25, 0x11, 0x37, 0x7d, 0xb9, 0xbb, 0x24, 0x47, 0x56, - 0xe4, 0xf1, 0x73, 0x54, 0x72, 0x7c, 0x3f, 0xe0, 0xb2, 0x6b, 0x98, 0xb9, 0x5f, 0xcf, 0x35, 0x4b, - 0xc7, 0xf7, 0x36, 0x39, 0x97, 0x72, 0xd2, 0x59, 0xad, 0x94, 0xfc, 0xc0, 0xe7, 0xf4, 0xac, 0x7d, - 0x5d, 0x07, 0x2e, 0x65, 0x3c, 0x24, 0x1b, 0xe3, 0xe0, 0x2b, 0x54, 0x59, 0x65, 0xe1, 0x0a, 0xca, - 0x8d, 0xe1, 0x4c, 0x8d, 0x4b, 0x22, 0x3e, 0xf1, 0x0d, 0x94, 0x9f, 0x39, 0x93, 0x08, 0xd4, 0x48, - 0x24, 0x6a, 0x71, 0x6f, 0xfb, 0xae, 0xd1, 0x78, 0x61, 0xa0, 0xa2, 0x0c, 0xfe, 0xd0, 0x63, 0x1c, - 0xff, 0x80, 0x0a, 0x62, 0xf7, 0x03, 0x87, 0x3b, 0x92, 0x5e, 0x3a, 0xb6, 0x36, 0xab, 0x95, 0x60, - 0x3f, 0x02, 0xee, 0xb4, 0x2b, 0x3a, 0xe3, 0x42, 0x6c, 0x21, 0x89, 0x22, 0x3e, 0x41, 0x79, 0x8f, - 0xc3, 0x94, 0x99, 0xdb, 0xb2, 0x30, 0x1f, 0x6d, 0x5c, 0x98, 0xf6, 0x5e, 0x3c, 0x75, 0x3b, 0x82, - 0x4f, 0x94, 0x4c, 0xe3, 0x37, 0x03, 0x95, 0xbf, 0xa6, 0x41, 0x14, 0x12, 0x50, 0xa3, 0x84, 0xe1, - 0x0f, 0x50, 0x7e, 0x28, 0x2c, 0xfa, 0xae, 0x48, 0x78, 0x0a, 0xa6, 0x7c, 0x62, 0x34, 0xd1, 0x98, - 0x21, 0x73, 0xd1, 0xa3, 0x29, 0x91, 0x21, 0xa9, 0x1f, 0xdf, 0x11, 0xdd, 0xa9, 0x16, 0x27, 0xce, - 0x14, 0x98, 0x99, 0x93, 0x04, 0xdd, 0x73, 0x19, 0x07, 0x59, 0xc6, 0x35, 0x7e, 0xcf, 0xa1, 0xfd, - 0x95, 0x71, 0x83, 0x0f, 0x51, 0x21, 0x06, 0xe9, 0x0c, 0x93, 0x7a, 0xc5, 0x5a, 0x24, 0x41, 0x88, - 0xa9, 0xe8, 0x0b, 0xa9, 0xd0, 0x71, 0xf5, 0x9f, 0x4b, 0xa7, 0xe2, 0x49, 0xec, 0x20, 0x29, 0x46, - 0xdc, 0x24, 0x62, 0xa1, 0xaf, 0xaa, 0x64, 0xfe, 0x0b, 0x2c, 0x91, 0x1e, 0xdc, 0x46, 0xb9, 0xc8, - 0x1b, 0xe8, 0x8b, 0xe9, 0x96, 0x06, 0xe4, 0x7a, 0x9b, 0xde, 0x8a, 0x82, 0x2c, 0x36, 0xe1, 0x84, - 0x9e, 0xac, 0xa8, 0xbe, 0xb3, 0x92, 0x4d, 0xb4, 0x4e, 0x3b, 0xaa, 0xd2, 0x09, 0x42, 0xdc, 0x88, - 0x4e, 0xe8, 0x3d, 0x01, 0xca, 0xbc, 0xc0, 0x97, 0x37, 0x58, 0xe6, 0x46, 0x6c, 0x9d, 0x76, 0xb4, - 0x87, 0x64, 0x50, 0xb8, 0x85, 0xf6, 0xe3, 0x22, 0xc4, 0xc4, 0x5d, 0x49, 0xbc, 0xa9, 0x89, 0xfb, - 0x64, 0xd9, 0x4d, 0x56, 0xf1, 0xf8, 0x73, 0x54, 0x62, 0x51, 0x3f, 0x29, 0x76, 0x41, 0xd2, 0x93, - 0x76, 0xea, 0xa6, 0x2e, 0x92, 0xc5, 0x35, 0xfe, 0x31, 0xd0, 0x95, 0xd3, 0x60, 0xe2, 0xb9, 0x67, - 0xf8, 0xe9, 0xb9, 0x5e, 0xb8, 0xb5, 0x59, 0x2f, 0xa8, 0x9f, 0x2e, 0xbb, 0x21, 0xd9, 0x68, 0x6a, - 0xcb, 0xf4, 0x43, 0x17, 0xe5, 0x69, 0x34, 0x81, 0xb8, 0x1f, 0xac, 0x4d, 0xfa, 0x41, 0x25, 0x47, - 0xa2, 0x09, 0xa4, 0x87, 0x5b, 0xac, 0x18, 0x51, 0x5a, 0xf8, 0x0e, 0x42, 0xc1, 0xd4, 0xe3, 0x72, - 0x52, 0xc5, 0x87, 0xf5, 0xa6, 0x4c, 0x21, 0xb1, 0xa6, 0xaf, 0x96, 0x0c, 0xb4, 0xf1, 0x87, 0x81, - 0x90, 0x52, 0xff, 0x1f, 0x46, 0xc1, 0xe3, 0xe5, 0x51, 0xf0, 0xf1, 0xe6, 0x5b, 0x5f, 0x33, 0x0b, - 0x5e, 0xe4, 0xe2, 0xec, 0x45, 0x35, 0x2e, 0xf9, 0x66, 0xac, 0xa1, 0xbc, 0x78, 0x5a, 0xc4, 0xc3, - 0xa0, 0x28, 0x90, 0xe2, 0xd9, 0xc1, 0x88, 0xb2, 0x63, 0x0b, 0x21, 0xf1, 0x21, 0x4f, 0x74, 0x5c, - 0xd4, 0xb2, 0x28, 0x6a, 0x2f, 0xb1, 0x92, 0x0c, 0x42, 0x08, 0x8a, 0x87, 0x1b, 0x33, 0x77, 0x52, - 0x41, 0xf1, 0x9e, 0x63, 0x44, 0xd9, 0xb1, 0x9b, 0x1d, 0x41, 0x79, 0x59, 0x83, 0xe3, 0x4d, 0x6a, - 0xb0, 0x3c, 0xee, 0xd2, 0x71, 0x70, 0xe1, 0xe8, 0xb2, 0x10, 0x4a, 0x66, 0x03, 0x33, 0xaf, 0xa4, - 0x59, 0x27, 0xc3, 0x83, 0x91, 0x0c, 0x02, 0x7f, 0x89, 0xf6, 0xfd, 0xc0, 0x8f, 0xa5, 0x7a, 0xe4, - 0x21, 0x33, 0x77, 0x25, 0xe9, 0xba, 0x68, 0xb9, 0x93, 0x65, 0x17, 0x59, 0xc5, 0xae, 0x9c, 0xbc, - 0xc2, 0xc6, 0x27, 0xaf, 0xdd, 0x7c, 0xf9, 0xa6, 0xba, 0xf5, 0xea, 0x4d, 0x75, 0xeb, 0xf5, 0x9b, - 0xea, 0xd6, 0xcf, 0x8b, 0xaa, 0xf1, 0x72, 0x51, 0x35, 0x5e, 0x2d, 0xaa, 0xc6, 0xeb, 0x45, 0xd5, - 0xf8, 0x73, 0x51, 0x35, 0x7e, 0xfd, 0xab, 0xba, 0xf5, 0xfd, 0xf6, 0xec, 0xe8, 0xdf, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x33, 0x6e, 0xcf, 0xc7, 0x82, 0x0d, 0x00, 0x00, + // 1287 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xcf, 0xc6, 0x71, 0x63, 0x8f, 0x1b, 0xc7, 0x99, 0xf6, 0xfb, 0xed, 0x92, 0x83, 0x6d, 0x8c, + 0x84, 0x0c, 0x84, 0xdd, 0x26, 0x14, 0x5a, 0x55, 0x02, 0xc9, 0xa6, 0xa5, 0x58, 0xb4, 0x49, 0x34, + 0xc6, 0x3d, 0x20, 0x0e, 0x5d, 0xaf, 0x5f, 0xec, 0xc5, 0xf6, 0xec, 0x76, 0x67, 0xd6, 0x28, 0x37, + 0xfe, 0x01, 0x24, 0xee, 0xfc, 0x17, 0xdc, 0x10, 0x27, 0x6e, 0x39, 0xf6, 0xd8, 0x93, 0x45, 0x0c, + 0x7f, 0x45, 0x0e, 0x08, 0xcd, 0xec, 0xec, 0x0f, 0x3b, 0xb1, 0xea, 0x70, 0xe0, 0xb6, 0xf3, 0xde, + 0xe7, 0xf3, 0x79, 0x6f, 0xdf, 0xbe, 0xf7, 0x66, 0xd1, 0x57, 0xc3, 0x07, 0xcc, 0x70, 0x5c, 0x73, + 0x18, 0x74, 0xc1, 0xa7, 0xc0, 0x81, 0x99, 0x13, 0xa0, 0x3d, 0xd7, 0x37, 0x95, 0xc3, 0xf2, 0x1c, + 0x06, 0xfe, 0x04, 0x7c, 0xd3, 0x1b, 0xf6, 0xe5, 0xc9, 0xb4, 0x82, 0x9e, 0xc3, 0xcd, 0xc9, 0xbe, + 0xd9, 0x07, 0x0a, 0xbe, 0xc5, 0xa1, 0x67, 0x78, 0xbe, 0xcb, 0x5d, 0x5c, 0x0b, 0x39, 0x46, 0xcc, + 0x31, 0xbc, 0x61, 0x5f, 0x9e, 0x0c, 0xc9, 0x31, 0x26, 0xfb, 0xbb, 0x1f, 0xf6, 0x1d, 0x3e, 0x08, + 0xba, 0x86, 0xed, 0x8e, 0xcd, 0xbe, 0xdb, 0x77, 0x4d, 0x49, 0xed, 0x06, 0x27, 0xf2, 0x24, 0x0f, + 0xf2, 0x29, 0x94, 0xdc, 0xdd, 0x4b, 0xd2, 0x30, 0xad, 0x80, 0x0f, 0x80, 0x72, 0xc7, 0xb6, 0xb8, + 0xe3, 0xd2, 0x2b, 0x12, 0xd8, 0xbd, 0x97, 0xa0, 0xc7, 0x96, 0x3d, 0x70, 0x28, 0xf8, 0xa7, 0x49, + 0xde, 0x63, 0xe0, 0xd6, 0x55, 0x2c, 0x73, 0x19, 0xcb, 0x0f, 0x28, 0x77, 0xc6, 0x70, 0x89, 0xf0, + 0xc9, 0x9b, 0x08, 0xcc, 0x1e, 0xc0, 0xd8, 0x5a, 0xe4, 0xd5, 0xfe, 0x42, 0x28, 0xfb, 0x78, 0x02, + 0x94, 0xe3, 0x3d, 0x94, 0x1d, 0xc1, 0x04, 0x46, 0xba, 0x56, 0xd5, 0xea, 0xf9, 0xe6, 0xff, 0xcf, + 0xa6, 0x95, 0xb5, 0xd9, 0xb4, 0x92, 0x7d, 0x2a, 0x8c, 0x17, 0xd1, 0x03, 0x09, 0x41, 0xf8, 0x10, + 0x6d, 0xca, 0xfa, 0xb5, 0x1e, 0xe9, 0xeb, 0x12, 0x7f, 0x4f, 0xe1, 0x37, 0x1b, 0xa1, 0xf9, 0x62, + 0x5a, 0x79, 0x7b, 0x59, 0x4e, 0xfc, 0xd4, 0x03, 0x66, 0x74, 0x5a, 0x8f, 0x48, 0x24, 0x22, 0xa2, + 0x33, 0x6e, 0xf5, 0x41, 0xcf, 0xcc, 0x47, 0x6f, 0x0b, 0xe3, 0x45, 0xf4, 0x40, 0x42, 0x10, 0x3e, + 0x40, 0xc8, 0x87, 0x97, 0x01, 0x30, 0xde, 0x21, 0x2d, 0x7d, 0x43, 0x52, 0xb0, 0xa2, 0x20, 0x12, + 0x7b, 0x48, 0x0a, 0x85, 0xab, 0x68, 0x63, 0x02, 0x7e, 0x57, 0xcf, 0x4a, 0xf4, 0x4d, 0x85, 0xde, + 0x78, 0x0e, 0x7e, 0x97, 0x48, 0x0f, 0xfe, 0x12, 0x6d, 0x04, 0x0c, 0x7c, 0xfd, 0x46, 0x55, 0xab, + 0x17, 0x0e, 0xde, 0x35, 0x92, 0xd6, 0x31, 0xe6, 0xbf, 0xb3, 0x31, 0xd9, 0x37, 0x3a, 0x0c, 0xfc, + 0x16, 0x3d, 0x71, 0x13, 0x25, 0x61, 0x21, 0x52, 0x01, 0x0f, 0x50, 0xc9, 0x19, 0x7b, 0xe0, 0x33, + 0x97, 0x8a, 0x5a, 0x0b, 0x8f, 0xbe, 0x79, 0x2d, 0xd5, 0xdb, 0xb3, 0x69, 0xa5, 0xd4, 0x5a, 0xd0, + 0x20, 0x97, 0x54, 0xf1, 0x07, 0x28, 0xcf, 0xdc, 0xc0, 0xb7, 0xa1, 0x75, 0xcc, 0xf4, 0x5c, 0x35, + 0x53, 0xcf, 0x37, 0xb7, 0x66, 0xd3, 0x4a, 0xbe, 0x1d, 0x19, 0x49, 0xe2, 0xc7, 0x26, 0xca, 0x8b, + 0xf4, 0x1a, 0x7d, 0xa0, 0x5c, 0x2f, 0xc9, 0x3a, 0xec, 0xa8, 0xec, 0xf3, 0x9d, 0xc8, 0x41, 0x12, + 0x0c, 0x7e, 0x81, 0xf2, 0x6e, 0xf7, 0x3b, 0xb0, 0x39, 0x81, 0x13, 0x3d, 0x2f, 0x5f, 0xe0, 0x23, + 0xe3, 0xcd, 0x13, 0x65, 0x1c, 0x45, 0x24, 0xf0, 0x81, 0xda, 0x10, 0xa6, 0x14, 0x1b, 0x49, 0x22, + 0x8a, 0x07, 0xa8, 0xe8, 0x03, 0xf3, 0x5c, 0xca, 0xa0, 0xcd, 0x2d, 0x1e, 0x30, 0x1d, 0xc9, 0x30, + 0x7b, 0xa9, 0x30, 0x71, 0xf3, 0x24, 0x91, 0xc4, 0xdc, 0x88, 0x40, 0x21, 0xa7, 0x89, 0x67, 0xd3, + 0x4a, 0x91, 0xcc, 0xe9, 0x90, 0x05, 0x5d, 0x6c, 0xa1, 0x2d, 0xd5, 0x0d, 0x61, 0x22, 0x7a, 0x41, + 0x06, 0xaa, 0x2f, 0x0d, 0xa4, 0x26, 0xc7, 0xe8, 0xd0, 0x21, 0x75, 0xbf, 0xa7, 0xcd, 0x9d, 0xd9, + 0xb4, 0xb2, 0x45, 0xd2, 0x12, 0x64, 0x5e, 0x11, 0xf7, 0x92, 0x97, 0x51, 0x31, 0x6e, 0x5e, 0x33, + 0xc6, 0xdc, 0x8b, 0xa8, 0x20, 0x0b, 0x9a, 0xf8, 0x47, 0x0d, 0xe9, 0x2a, 0x2e, 0x01, 0x1b, 0x9c, + 0x09, 0xf4, 0xbe, 0x76, 0xc6, 0xc0, 0xb8, 0x35, 0xf6, 0xf4, 0x2d, 0x19, 0xd0, 0x5c, 0xad, 0x7a, + 0xcf, 0x1c, 0xdb, 0x77, 0x05, 0xb7, 0x59, 0x55, 0x6d, 0xa0, 0x93, 0x25, 0xc2, 0x64, 0x69, 0x48, + 0xec, 0xa2, 0xa2, 0x9c, 0xca, 0x24, 0x89, 0xe2, 0xbf, 0x4b, 0x22, 0x1a, 0xfa, 0x62, 0x7b, 0x4e, + 0x8e, 0x2c, 0xc8, 0xe3, 0x97, 0xa8, 0x60, 0x51, 0xea, 0x72, 0x39, 0x35, 0x4c, 0xdf, 0xae, 0x66, + 0xea, 0x85, 0x83, 0x87, 0xab, 0xf4, 0xa5, 0xdc, 0x74, 0x46, 0x23, 0x21, 0x3f, 0xa6, 0xdc, 0x3f, + 0x6d, 0xde, 0x52, 0x81, 0x0b, 0x29, 0x0f, 0x49, 0xc7, 0xd8, 0xfd, 0x0c, 0x95, 0x16, 0x59, 0xb8, + 0x84, 0x32, 0x43, 0x38, 0x0d, 0xd7, 0x25, 0x11, 0x8f, 0xf8, 0x36, 0xca, 0x4e, 0xac, 0x51, 0x00, + 0xe1, 0x4a, 0x24, 0xe1, 0xe1, 0xe1, 0xfa, 0x03, 0xad, 0xf6, 0xab, 0x86, 0xf2, 0x32, 0xf8, 0x53, + 0x87, 0x71, 0xfc, 0x2d, 0xca, 0x89, 0xb7, 0xef, 0x59, 0xdc, 0x92, 0xf4, 0xc2, 0x81, 0xb1, 0x5a, + 0xad, 0x04, 0xfb, 0x19, 0x70, 0xab, 0x59, 0x52, 0x19, 0xe7, 0x22, 0x0b, 0x89, 0x15, 0xf1, 0x21, + 0xca, 0x3a, 0x1c, 0xc6, 0x4c, 0x5f, 0x97, 0x85, 0x79, 0x6f, 0xe5, 0xc2, 0x34, 0xb7, 0xa2, 0xad, + 0xdb, 0x12, 0x7c, 0x12, 0xca, 0xd4, 0x7e, 0xd6, 0x50, 0xf1, 0x89, 0xef, 0x06, 0x1e, 0x81, 0x70, + 0x95, 0x30, 0xfc, 0x0e, 0xca, 0xf6, 0x85, 0x45, 0xdd, 0x15, 0x31, 0x2f, 0x84, 0x85, 0x3e, 0xb1, + 0x9a, 0xfc, 0x88, 0x21, 0x73, 0x51, 0xab, 0x29, 0x96, 0x21, 0x89, 0x1f, 0xdf, 0x17, 0xd3, 0x19, + 0x1e, 0x0e, 0xad, 0x31, 0x30, 0x3d, 0x23, 0x09, 0x6a, 0xe6, 0x52, 0x0e, 0x32, 0x8f, 0xab, 0xfd, + 0x92, 0x41, 0xdb, 0x0b, 0xeb, 0x06, 0xef, 0xa1, 0x5c, 0x04, 0x52, 0x19, 0xc6, 0xf5, 0x8a, 0xb4, + 0x48, 0x8c, 0x10, 0x5b, 0x91, 0x0a, 0x29, 0xcf, 0xb2, 0xd5, 0x97, 0x4b, 0xb6, 0xe2, 0x61, 0xe4, + 0x20, 0x09, 0x46, 0xdc, 0x24, 0xe2, 0xa0, 0xae, 0xaa, 0x78, 0xff, 0x0b, 0x2c, 0x91, 0x1e, 0xdc, + 0x44, 0x99, 0xc0, 0xe9, 0xa9, 0x8b, 0xe9, 0xae, 0x02, 0x64, 0x3a, 0xab, 0xde, 0x8a, 0x82, 0x2c, + 0x5e, 0xc2, 0xf2, 0x1c, 0x59, 0x51, 0x75, 0x67, 0xc5, 0x2f, 0xd1, 0x38, 0x6e, 0x85, 0x95, 0x8e, + 0x11, 0xe2, 0x46, 0xb4, 0x3c, 0xe7, 0x39, 0xf8, 0xcc, 0x71, 0xa9, 0xbc, 0xc1, 0x52, 0x37, 0x62, + 0xe3, 0xb8, 0xa5, 0x3c, 0x24, 0x85, 0xc2, 0x0d, 0xb4, 0x1d, 0x15, 0x21, 0x22, 0x6e, 0x4a, 0xe2, + 0x1d, 0x45, 0xdc, 0x26, 0xf3, 0x6e, 0xb2, 0x88, 0xc7, 0x1f, 0xa3, 0x02, 0x0b, 0xba, 0x71, 0xb1, + 0x73, 0x92, 0x1e, 0x8f, 0x53, 0x3b, 0x71, 0x91, 0x34, 0xae, 0xf6, 0xfb, 0x3a, 0xba, 0x71, 0xec, + 0x8e, 0x1c, 0xfb, 0x14, 0xbf, 0xb8, 0x34, 0x0b, 0x77, 0x57, 0x9b, 0x85, 0xf0, 0xa3, 0xcb, 0x69, + 0x88, 0x5f, 0x34, 0xb1, 0xa5, 0xe6, 0xa1, 0x8d, 0xb2, 0x7e, 0x30, 0x82, 0x68, 0x1e, 0x8c, 0x55, + 0xe6, 0x21, 0x4c, 0x8e, 0x04, 0x23, 0x48, 0x9a, 0x5b, 0x9c, 0x18, 0x09, 0xb5, 0xf0, 0x7d, 0x84, + 0xdc, 0xb1, 0xc3, 0xe5, 0xa6, 0x8a, 0x9a, 0xf5, 0x8e, 0x4c, 0x21, 0xb6, 0x26, 0x7f, 0x2d, 0x29, + 0x28, 0x7e, 0x82, 0x76, 0xc4, 0xe9, 0x99, 0x45, 0xad, 0x3e, 0xf4, 0xbe, 0x70, 0x60, 0xd4, 0x63, + 0xb2, 0x51, 0x72, 0xcd, 0xb7, 0x54, 0xa4, 0x9d, 0xa3, 0x45, 0x00, 0xb9, 0xcc, 0xa9, 0xfd, 0xa6, + 0x21, 0x14, 0xa6, 0xf9, 0x1f, 0xec, 0x94, 0xa3, 0xf9, 0x9d, 0xf2, 0xfe, 0xea, 0x35, 0x5c, 0xb2, + 0x54, 0xfe, 0xce, 0x44, 0xd9, 0x8b, 0xb2, 0x5e, 0xf3, 0xe7, 0xb3, 0x82, 0xb2, 0xe2, 0x1f, 0x25, + 0xda, 0x2a, 0x79, 0x81, 0x14, 0xff, 0x2f, 0x8c, 0x84, 0x76, 0x6c, 0x20, 0x24, 0x1e, 0xe4, 0x68, + 0x44, 0x5f, 0xa7, 0x28, 0xbe, 0x4e, 0x27, 0xb6, 0x92, 0x14, 0x42, 0x08, 0x8a, 0x3f, 0x40, 0xf1, + 0x21, 0x62, 0x41, 0xf1, 0x63, 0xc8, 0x48, 0x68, 0xc7, 0x76, 0x7a, 0x97, 0x65, 0x65, 0x0d, 0x0e, + 0x56, 0xa9, 0xc1, 0xfc, 0xde, 0x4c, 0xf6, 0xca, 0x95, 0x3b, 0xd0, 0x40, 0x28, 0x5e, 0x32, 0x4c, + 0xbf, 0x91, 0x64, 0x1d, 0x6f, 0x21, 0x46, 0x52, 0x08, 0xfc, 0x29, 0xda, 0xa6, 0x2e, 0x8d, 0xa4, + 0x3a, 0xe4, 0x29, 0xd3, 0x37, 0x25, 0xe9, 0x96, 0x98, 0xdd, 0xc3, 0x79, 0x17, 0x59, 0xc4, 0x2e, + 0xb4, 0x70, 0x6e, 0xf5, 0x16, 0xfe, 0xfc, 0xaa, 0x16, 0xce, 0xcb, 0x16, 0xfe, 0xdf, 0xaa, 0xed, + 0xdb, 0xac, 0x9f, 0x9d, 0x97, 0xd7, 0x5e, 0x9d, 0x97, 0xd7, 0x5e, 0x9f, 0x97, 0xd7, 0x7e, 0x98, + 0x95, 0xb5, 0xb3, 0x59, 0x59, 0x7b, 0x35, 0x2b, 0x6b, 0xaf, 0x67, 0x65, 0xed, 0x8f, 0x59, 0x59, + 0xfb, 0xe9, 0xcf, 0xf2, 0xda, 0x37, 0xeb, 0x93, 0xfd, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x6a, + 0x8e, 0x8a, 0xae, 0x10, 0x0e, 0x00, 0x00, } func (m *Event) Marshal() (dAtA []byte, err error) { @@ -696,6 +699,14 @@ func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.OmitManagedFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 if len(m.OmitStages) > 0 { for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.OmitStages[iNdEx]) @@ -799,6 +810,16 @@ func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.OmitManagedFields != nil { + i-- + if *m.OmitManagedFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } if len(m.OmitStages) > 0 { for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.OmitStages[iNdEx]) @@ -1032,6 +1053,7 @@ func (m *Policy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + n += 2 return n } @@ -1102,6 +1124,9 @@ func (m *PolicyRule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.OmitManagedFields != nil { + n += 2 + } return n } @@ -1204,6 +1229,7 @@ func (this *Policy) String() string { `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + repeatedStringForRules + `,`, `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `OmitManagedFields:` + fmt.Sprintf("%v", this.OmitManagedFields) + `,`, `}`, }, "") return s @@ -1242,6 +1268,7 @@ func (this *PolicyRule) String() string { `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `OmitManagedFields:` + valueToStringGenerated(this.OmitManagedFields) + `,`, `}`, }, "") return s @@ -2631,6 +2658,26 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitManagedFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OmitManagedFields = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3056,6 +3103,27 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitManagedFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OmitManagedFields = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto index d142ee7e4..0c2050650 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -190,6 +190,15 @@ message Policy { // be specified per rule in which case the union of both are omitted. // +optional repeated string omitStages = 3; + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // This is used as a global default - a value of 'true' will omit the managed fileds, + // otherwise the managed fields will be included in the API audit log. + // Note that this can also be specified per rule in which case the value specified + // in a rule will override the global default. + // +optional + optional bool omitManagedFields = 4; } // PolicyList is a list of audit Policies. @@ -245,5 +254,16 @@ message PolicyRule { // An empty list means no restrictions will apply. // +optional repeated string omitStages = 8; + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // - a value of 'true' will drop the managed fields from the API audit log + // - a value of 'false' indicates that the managed fileds should be included + // in the API audit log + // Note that the value, if specified, in this rule will override the global default + // If a value is not specified then the global default specified in + // Policy.OmitManagedFields will stand. + // +optional + optional bool omitManagedFields = 9; } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go index dace73ed9..3f70ebaa5 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go @@ -166,6 +166,15 @@ type Policy struct { // be specified per rule in which case the union of both are omitted. // +optional OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"` + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // This is used as a global default - a value of 'true' will omit the managed fileds, + // otherwise the managed fields will be included in the API audit log. + // Note that this can also be specified per rule in which case the value specified + // in a rule will override the global default. + // +optional + OmitManagedFields bool `json:"omitManagedFields,omitempty" protobuf:"varint,4,opt,name=omitManagedFields"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -226,6 +235,17 @@ type PolicyRule struct { // An empty list means no restrictions will apply. // +optional OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"` + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // - a value of 'true' will drop the managed fields from the API audit log + // - a value of 'false' indicates that the managed fileds should be included + // in the API audit log + // Note that the value, if specified, in this rule will override the global default + // If a value is not specified then the global default specified in + // Policy.OmitManagedFields will stand. + // +optional + OmitManagedFields *bool `json:"omitManagedFields,omitempty" protobuf:"varint,9,opt,name=omitManagedFields"` } // GroupResources represents resource kinds in an API group. diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go index 4500bfe31..53cbb0208 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -245,6 +246,7 @@ func autoConvert_v1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conv out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules)) out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = in.OmitManagedFields return nil } @@ -257,6 +259,7 @@ func autoConvert_audit_Policy_To_v1_Policy(in *audit.Policy, out *Policy, s conv out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = in.OmitManagedFields return nil } @@ -296,6 +299,7 @@ func autoConvert_v1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.Po out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = (*bool)(unsafe.Pointer(in.OmitManagedFields)) return nil } @@ -313,6 +317,7 @@ func autoConvert_audit_PolicyRule_To_v1_PolicyRule(in *audit.PolicyRule, out *Po out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = (*bool)(unsafe.Pointer(in.OmitManagedFields)) return nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go index 81d126d4e..0b1b0052d 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -277,6 +278,11 @@ func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { *out = make([]Stage, len(*in)) copy(*out, *in) } + if in.OmitManagedFields != nil { + in, out := &in.OmitManagedFields, &out.OmitManagedFields + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go index cce2e603a..dac177e93 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go index 4af2810f0..84ceca0b7 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go @@ -261,86 +261,89 @@ func init() { } var fileDescriptor_46c0b2c8ea67b187 = []byte{ - // 1263 bytes of a gzipped FileDescriptorProto + // 1306 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xd6, 0x71, 0x63, 0x8f, 0x1b, 0xc7, 0x99, 0x56, 0x74, 0x95, 0x83, 0x6d, 0x8c, 0x84, - 0x2c, 0x08, 0xbb, 0x49, 0x14, 0x68, 0x40, 0x02, 0x11, 0xab, 0x15, 0x58, 0x4a, 0x43, 0x78, 0x89, - 0x2b, 0xf1, 0xe7, 0xc0, 0xda, 0x7e, 0xb1, 0x17, 0xdb, 0xbb, 0xcb, 0xce, 0xac, 0xab, 0xdc, 0x38, - 0x70, 0x45, 0xe2, 0xce, 0x87, 0xe0, 0x23, 0x54, 0xdc, 0x72, 0xec, 0xb1, 0x27, 0x8b, 0x98, 0x6f, - 0x91, 0x03, 0x42, 0x33, 0xfb, 0x67, 0xd6, 0x4e, 0x2d, 0x1c, 0x0e, 0xbd, 0xed, 0xbc, 0xf7, 0x7b, - 0xbf, 0xf7, 0xe6, 0xed, 0xfb, 0x33, 0xe4, 0xeb, 0xc1, 0x01, 0x33, 0x6c, 0xd7, 0x1c, 0x04, 0x6d, - 0xf4, 0x1d, 0xe4, 0xc8, 0xcc, 0x31, 0x3a, 0x5d, 0xd7, 0x37, 0x23, 0x85, 0xe5, 0xd9, 0x0c, 0xfd, - 0x31, 0xfa, 0xa6, 0x37, 0xe8, 0xc9, 0x93, 0x69, 0x05, 0x5d, 0x9b, 0x9b, 0xe3, 0x5d, 0x6b, 0xe8, - 0xf5, 0xad, 0x5d, 0xb3, 0x87, 0x0e, 0xfa, 0x16, 0xc7, 0xae, 0xe1, 0xf9, 0x2e, 0x77, 0x69, 0x3d, - 0xb4, 0x34, 0x12, 0x4b, 0xc3, 0x1b, 0xf4, 0xe4, 0xc9, 0x90, 0x96, 0x46, 0x6c, 0xb9, 0xf5, 0x41, - 0xcf, 0xe6, 0xfd, 0xa0, 0x6d, 0x74, 0xdc, 0x91, 0xd9, 0x73, 0x7b, 0xae, 0x29, 0x09, 0xda, 0xc1, - 0xb9, 0x3c, 0xc9, 0x83, 0xfc, 0x0a, 0x89, 0xb7, 0xb6, 0x55, 0x48, 0xa6, 0x15, 0xf0, 0x3e, 0x3a, - 0xdc, 0xee, 0x58, 0xdc, 0x76, 0x1d, 0x73, 0x7c, 0x23, 0x8c, 0xad, 0x7d, 0x85, 0x1e, 0x59, 0x9d, - 0xbe, 0xed, 0xa0, 0x7f, 0xa1, 0xee, 0x30, 0x42, 0x6e, 0xbd, 0xce, 0xca, 0x5c, 0x64, 0xe5, 0x07, - 0x0e, 0xb7, 0x47, 0x78, 0xc3, 0xe0, 0xa3, 0xff, 0x32, 0x60, 0x9d, 0x3e, 0x8e, 0xac, 0x79, 0xbb, - 0xda, 0x1f, 0xf7, 0x48, 0xf6, 0xc9, 0x18, 0x1d, 0x4e, 0x7f, 0x20, 0x39, 0x11, 0x4d, 0xd7, 0xe2, - 0x96, 0xae, 0x55, 0xb5, 0x7a, 0x61, 0x6f, 0xc7, 0x50, 0x29, 0x4c, 0x48, 0x55, 0x16, 0x05, 0xda, - 0x18, 0xef, 0x1a, 0x5f, 0xb5, 0x7f, 0xc4, 0x0e, 0x7f, 0x8a, 0xdc, 0x6a, 0xd0, 0xcb, 0x49, 0x65, - 0x65, 0x3a, 0xa9, 0x10, 0x25, 0x83, 0x84, 0x95, 0x6e, 0x93, 0xec, 0x10, 0xc7, 0x38, 0xd4, 0xef, - 0x54, 0xb5, 0x7a, 0xbe, 0xf1, 0x56, 0x04, 0xce, 0x1e, 0x09, 0xe1, 0x75, 0xfc, 0x01, 0x21, 0x88, - 0x7e, 0x47, 0xf2, 0x22, 0x70, 0xc6, 0xad, 0x91, 0xa7, 0x67, 0x64, 0x40, 0xef, 0x2d, 0x17, 0xd0, - 0x99, 0x3d, 0xc2, 0xc6, 0x66, 0xc4, 0x9e, 0x3f, 0x8b, 0x49, 0x40, 0xf1, 0xd1, 0x63, 0xb2, 0x26, - 0x8b, 0xa0, 0xf9, 0x58, 0x5f, 0x95, 0xc1, 0xec, 0x47, 0xf0, 0xb5, 0xc3, 0x50, 0x7c, 0x3d, 0xa9, - 0xbc, 0xbd, 0x28, 0xa5, 0xfc, 0xc2, 0x43, 0x66, 0xb4, 0x9a, 0x8f, 0x21, 0x26, 0x11, 0x57, 0x63, - 0xdc, 0xea, 0xa1, 0x9e, 0x9d, 0xbd, 0xda, 0xa9, 0x10, 0x5e, 0xc7, 0x1f, 0x10, 0x82, 0xe8, 0x1e, - 0x21, 0x3e, 0xfe, 0x14, 0x20, 0xe3, 0x2d, 0x68, 0xea, 0x77, 0xa5, 0x49, 0x92, 0x3a, 0x48, 0x34, - 0x90, 0x42, 0xd1, 0x2a, 0x59, 0x1d, 0xa3, 0xdf, 0xd6, 0xd7, 0x24, 0xfa, 0x5e, 0x84, 0x5e, 0x7d, - 0x86, 0x7e, 0x1b, 0xa4, 0x86, 0x7e, 0x49, 0x56, 0x03, 0x86, 0xbe, 0x9e, 0x93, 0xb9, 0x7a, 0x37, - 0x95, 0x2b, 0x63, 0xb6, 0x4c, 0x45, 0x8e, 0x5a, 0x0c, 0xfd, 0xa6, 0x73, 0xee, 0x2a, 0x26, 0x21, - 0x01, 0xc9, 0x40, 0xfb, 0xa4, 0x64, 0x8f, 0x3c, 0xf4, 0x99, 0xeb, 0x88, 0x52, 0x11, 0x1a, 0x3d, - 0x7f, 0x2b, 0xd6, 0x07, 0xd3, 0x49, 0xa5, 0xd4, 0x9c, 0xe3, 0x80, 0x1b, 0xac, 0xf4, 0x7d, 0x92, - 0x67, 0x6e, 0xe0, 0x77, 0xb0, 0x79, 0xc2, 0x74, 0x52, 0xcd, 0xd4, 0xf3, 0x8d, 0x75, 0xf1, 0xd3, - 0x4e, 0x63, 0x21, 0x28, 0x3d, 0x35, 0x49, 0x5e, 0x84, 0x77, 0xd8, 0x43, 0x87, 0xeb, 0x54, 0xe6, - 0x21, 0xf9, 0xcb, 0xad, 0x58, 0x01, 0x0a, 0x43, 0xcf, 0x49, 0xde, 0x95, 0x85, 0x08, 0x78, 0xae, - 0x17, 0xe4, 0x05, 0x3e, 0x36, 0x96, 0x1d, 0x0b, 0x51, 0x5d, 0x03, 0x9e, 0xa3, 0x8f, 0x4e, 0x07, - 0xc3, 0xc0, 0x12, 0x21, 0x28, 0x6a, 0xda, 0x27, 0x45, 0x1f, 0x99, 0xe7, 0x3a, 0x0c, 0x4f, 0xb9, - 0xc5, 0x03, 0xa6, 0xdf, 0x93, 0xce, 0xb6, 0x97, 0xab, 0xd7, 0xd0, 0xa6, 0x41, 0xa7, 0x93, 0x4a, - 0x11, 0x66, 0x78, 0x60, 0x8e, 0x97, 0x5a, 0x64, 0x3d, 0xaa, 0x89, 0x30, 0x10, 0x7d, 0x5d, 0x3a, - 0xaa, 0x2f, 0x74, 0x14, 0xb5, 0xbf, 0xd1, 0x72, 0x06, 0x8e, 0xfb, 0xdc, 0x69, 0x6c, 0x4e, 0x27, - 0x95, 0x75, 0x48, 0x53, 0xc0, 0x2c, 0x23, 0xed, 0xaa, 0xcb, 0x44, 0x3e, 0x8a, 0xb7, 0xf4, 0x31, - 0x73, 0x91, 0xc8, 0xc9, 0x1c, 0x27, 0xfd, 0x55, 0x23, 0x7a, 0xe4, 0x17, 0xb0, 0x83, 0xf6, 0x18, - 0xbb, 0x49, 0xa3, 0xea, 0x1b, 0xd2, 0xa1, 0xb9, 0x5c, 0xf6, 0x9e, 0xda, 0x1d, 0xdf, 0x95, 0x2d, - 0x5f, 0x8d, 0x8a, 0x41, 0x87, 0x05, 0xc4, 0xb0, 0xd0, 0x25, 0x75, 0x49, 0x51, 0xf6, 0xa6, 0x0a, - 0xa2, 0xf4, 0xff, 0x82, 0x88, 0x5b, 0xbf, 0x78, 0x3a, 0x43, 0x07, 0x73, 0xf4, 0xf4, 0x39, 0x29, - 0x58, 0x8e, 0xe3, 0x72, 0xd9, 0x3b, 0x4c, 0xdf, 0xac, 0x66, 0xea, 0x85, 0xbd, 0xcf, 0x97, 0xaf, - 0x4e, 0x39, 0xb4, 0x8d, 0x43, 0x45, 0xf1, 0xc4, 0xe1, 0xfe, 0x45, 0xe3, 0x7e, 0xe4, 0xbe, 0x90, - 0xd2, 0x40, 0xda, 0xd3, 0xd6, 0x67, 0xa4, 0x34, 0x6f, 0x45, 0x4b, 0x24, 0x33, 0xc0, 0x0b, 0x39, - 0xf6, 0xf3, 0x20, 0x3e, 0xe9, 0x03, 0x92, 0x1d, 0x5b, 0xc3, 0x00, 0xc3, 0x59, 0x0d, 0xe1, 0xe1, - 0x93, 0x3b, 0x07, 0x5a, 0xed, 0x85, 0x46, 0xf2, 0xd2, 0xf9, 0x91, 0xcd, 0x38, 0xfd, 0xfe, 0xc6, - 0xd6, 0x30, 0x96, 0xcb, 0x98, 0xb0, 0x96, 0x3b, 0xa3, 0x14, 0x45, 0x9c, 0x8b, 0x25, 0xa9, 0x8d, - 0x71, 0x46, 0xb2, 0x36, 0xc7, 0x11, 0xd3, 0xef, 0xc8, 0xf4, 0x98, 0xb7, 0x4c, 0x4f, 0x63, 0x3d, - 0x9e, 0xc3, 0x4d, 0xc1, 0x02, 0x21, 0x59, 0xed, 0x77, 0x8d, 0x14, 0xbf, 0xf0, 0xdd, 0xc0, 0x03, - 0x0c, 0x87, 0x0b, 0xa3, 0xef, 0x90, 0x6c, 0x4f, 0x48, 0xc2, 0x14, 0x28, 0xbb, 0x10, 0x16, 0xea, - 0xc4, 0xb0, 0xf2, 0x63, 0x0b, 0x19, 0x51, 0x34, 0xac, 0x12, 0x1a, 0x50, 0x7a, 0xfa, 0x48, 0x74, - 0x6a, 0x78, 0x38, 0xb6, 0x46, 0xc8, 0xf4, 0x8c, 0x34, 0x88, 0xfa, 0x2f, 0xa5, 0x80, 0x59, 0x5c, - 0xed, 0x97, 0x0c, 0xd9, 0x98, 0x1b, 0x3d, 0x74, 0x9b, 0xe4, 0x62, 0x50, 0x14, 0x61, 0x92, 0xb5, - 0x98, 0x0b, 0x12, 0x84, 0x98, 0x93, 0x8e, 0xa0, 0xf2, 0xac, 0x4e, 0xf4, 0xff, 0xd4, 0x9c, 0x3c, - 0x8e, 0x15, 0xa0, 0x30, 0x62, 0xb7, 0x88, 0x83, 0xdc, 0xb2, 0xa9, 0xdd, 0x22, 0xb0, 0x20, 0x35, - 0xb4, 0x41, 0x32, 0x81, 0xdd, 0x8d, 0x76, 0xe5, 0x4e, 0x04, 0xc8, 0xb4, 0x96, 0xdd, 0x93, 0xc2, - 0x58, 0x6c, 0x3d, 0xcb, 0xb3, 0x9f, 0xa1, 0xcf, 0x6c, 0xd7, 0x89, 0x16, 0x65, 0xb2, 0xf5, 0x0e, - 0x4f, 0x9a, 0x91, 0x06, 0x52, 0x28, 0x7a, 0x48, 0x36, 0xe2, 0x6b, 0xc5, 0x86, 0xe1, 0xba, 0x7c, - 0x18, 0x19, 0x6e, 0xc0, 0xac, 0x1a, 0xe6, 0xf1, 0xf4, 0x43, 0x52, 0x60, 0x41, 0x3b, 0x49, 0x5f, - 0xb8, 0x3f, 0x93, 0x36, 0x39, 0x55, 0x2a, 0x48, 0xe3, 0x6a, 0xff, 0x68, 0xe4, 0xee, 0x89, 0x3b, - 0xb4, 0x3b, 0x17, 0x6f, 0xe0, 0x65, 0xf4, 0x0d, 0xc9, 0xfa, 0xc1, 0x10, 0xe3, 0x3a, 0xdf, 0x5f, - 0xbe, 0xce, 0xc3, 0x10, 0x21, 0x18, 0xa2, 0x2a, 0x5a, 0x71, 0x62, 0x10, 0x32, 0xd2, 0x47, 0x84, - 0xb8, 0x23, 0x9b, 0xcb, 0x69, 0x14, 0x17, 0xe1, 0x43, 0x19, 0x48, 0x22, 0x55, 0xef, 0x93, 0x14, - 0xb4, 0xf6, 0xa7, 0x46, 0x48, 0xc8, 0xfe, 0x06, 0x1a, 0xbd, 0x35, 0xdb, 0xe8, 0x3b, 0xb7, 0x4d, - 0xc0, 0x82, 0x4e, 0x7f, 0x91, 0x89, 0xef, 0x20, 0x72, 0xa2, 0x1e, 0xa0, 0xda, 0x32, 0x0f, 0xd0, - 0x0a, 0xc9, 0x8a, 0xa7, 0x44, 0xdc, 0xea, 0x79, 0x81, 0x14, 0xcf, 0x0c, 0x06, 0xa1, 0x9c, 0x1a, - 0x84, 0x88, 0x0f, 0x39, 0x23, 0xe2, 0xd4, 0x16, 0x45, 0x6a, 0x5b, 0x89, 0x14, 0x52, 0x08, 0x41, - 0x28, 0x1e, 0x6a, 0x4c, 0x5f, 0x55, 0x84, 0xe2, 0xfd, 0xc6, 0x20, 0x94, 0x53, 0x3b, 0x3d, 0x60, - 0xb2, 0x32, 0x13, 0x07, 0xcb, 0x67, 0x62, 0x76, 0xa4, 0xa9, 0x96, 0x7f, 0xed, 0x78, 0x32, 0x08, - 0x49, 0xfa, 0x9f, 0xe9, 0x77, 0x55, 0xec, 0xc9, 0x80, 0x60, 0x90, 0x42, 0xd0, 0x4f, 0xc9, 0x86, - 0xe3, 0x3a, 0x31, 0x55, 0x0b, 0x8e, 0x98, 0xbe, 0x26, 0x8d, 0xee, 0x8b, 0x26, 0x3c, 0x9e, 0x55, - 0xc1, 0x3c, 0x76, 0xae, 0x0a, 0x73, 0x4b, 0x57, 0x61, 0xc3, 0xb8, 0xbc, 0x2a, 0xaf, 0xbc, 0xbc, - 0x2a, 0xaf, 0xbc, 0xba, 0x2a, 0xaf, 0xfc, 0x3c, 0x2d, 0x6b, 0x97, 0xd3, 0xb2, 0xf6, 0x72, 0x5a, - 0xd6, 0x5e, 0x4d, 0xcb, 0xda, 0x5f, 0xd3, 0xb2, 0xf6, 0xdb, 0xdf, 0xe5, 0x95, 0x6f, 0x73, 0x71, - 0x12, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x19, 0xf8, 0xf6, 0xd0, 0x49, 0x0e, 0x00, 0x00, + 0x14, 0xcf, 0xc6, 0x71, 0xe3, 0x9d, 0x34, 0x4e, 0x32, 0x2d, 0x74, 0xc9, 0xc1, 0x0e, 0x41, 0x42, + 0x11, 0x84, 0xdd, 0xb6, 0x2a, 0xb4, 0x20, 0x81, 0x88, 0x69, 0x29, 0x96, 0xda, 0xb4, 0x4c, 0xea, + 0x4a, 0xfc, 0x39, 0x30, 0xb6, 0x5f, 0xec, 0xc5, 0xf6, 0xec, 0xb2, 0x33, 0xeb, 0x2a, 0x37, 0x0e, + 0xbd, 0x22, 0x71, 0xe7, 0x43, 0xf0, 0x11, 0x10, 0x27, 0x7a, 0xec, 0xb1, 0x27, 0x8b, 0x9a, 0x6f, + 0xd1, 0x13, 0x9a, 0xd9, 0xd9, 0x9d, 0xb5, 0x13, 0x8b, 0x0d, 0x87, 0xde, 0x66, 0xde, 0xfb, 0xbd, + 0xdf, 0x7b, 0xf3, 0xe6, 0xcd, 0x7b, 0x83, 0xbe, 0x1e, 0xdc, 0xe2, 0xae, 0x1f, 0x78, 0x83, 0xb8, + 0x0d, 0x11, 0x03, 0x01, 0xdc, 0x1b, 0x03, 0xeb, 0x06, 0x91, 0xa7, 0x15, 0x34, 0xf4, 0x39, 0x44, + 0x63, 0x88, 0xbc, 0x70, 0xd0, 0x53, 0x3b, 0x8f, 0xc6, 0x5d, 0x5f, 0x78, 0xe3, 0x6b, 0x74, 0x18, + 0xf6, 0xe9, 0x35, 0xaf, 0x07, 0x0c, 0x22, 0x2a, 0xa0, 0xeb, 0x86, 0x51, 0x20, 0x02, 0xbc, 0x97, + 0x58, 0xba, 0x99, 0xa5, 0x1b, 0x0e, 0x7a, 0x6a, 0xe7, 0x2a, 0x4b, 0x37, 0xb5, 0xdc, 0xfe, 0xa0, + 0xe7, 0x8b, 0x7e, 0xdc, 0x76, 0x3b, 0xc1, 0xc8, 0xeb, 0x05, 0xbd, 0xc0, 0x53, 0x04, 0xed, 0xf8, + 0x58, 0xed, 0xd4, 0x46, 0xad, 0x12, 0xe2, 0xed, 0x7d, 0x13, 0x92, 0x47, 0x63, 0xd1, 0x07, 0x26, + 0xfc, 0x0e, 0x15, 0x7e, 0xc0, 0xbc, 0xf1, 0xa9, 0x30, 0xb6, 0x6f, 0x18, 0xf4, 0x88, 0x76, 0xfa, + 0x3e, 0x83, 0xe8, 0xc4, 0x9c, 0x61, 0x04, 0x82, 0x9e, 0x65, 0xe5, 0x2d, 0xb2, 0x8a, 0x62, 0x26, + 0xfc, 0x11, 0x9c, 0x32, 0xf8, 0xe8, 0xbf, 0x0c, 0x78, 0xa7, 0x0f, 0x23, 0x3a, 0x6f, 0xb7, 0xfb, + 0xfb, 0x45, 0x54, 0xbe, 0x33, 0x06, 0x26, 0xf0, 0x0f, 0xa8, 0x22, 0xa3, 0xe9, 0x52, 0x41, 0x1d, + 0x6b, 0xc7, 0xda, 0x5b, 0xbb, 0x7e, 0xd5, 0x35, 0x29, 0xcc, 0x48, 0x4d, 0x16, 0x25, 0xda, 0x1d, + 0x5f, 0x73, 0x1f, 0xb4, 0x7f, 0x84, 0x8e, 0xb8, 0x0f, 0x82, 0x36, 0xf0, 0xb3, 0x49, 0x7d, 0x69, + 0x3a, 0xa9, 0x23, 0x23, 0x23, 0x19, 0x2b, 0xde, 0x47, 0xe5, 0x21, 0x8c, 0x61, 0xe8, 0x2c, 0xef, + 0x58, 0x7b, 0x76, 0xe3, 0x4d, 0x0d, 0x2e, 0xdf, 0x93, 0xc2, 0x57, 0xe9, 0x82, 0x24, 0x20, 0xfc, + 0x1d, 0xb2, 0x65, 0xe0, 0x5c, 0xd0, 0x51, 0xe8, 0x94, 0x54, 0x40, 0xef, 0x15, 0x0b, 0xe8, 0x91, + 0x3f, 0x82, 0xc6, 0x96, 0x66, 0xb7, 0x1f, 0xa5, 0x24, 0xc4, 0xf0, 0xe1, 0x43, 0xb4, 0xaa, 0x8a, + 0xa0, 0x79, 0xdb, 0x59, 0x51, 0xc1, 0xdc, 0xd0, 0xf0, 0xd5, 0x83, 0x44, 0xfc, 0x6a, 0x52, 0x7f, + 0x7b, 0x51, 0x4a, 0xc5, 0x49, 0x08, 0xdc, 0x6d, 0x35, 0x6f, 0x93, 0x94, 0x44, 0x1e, 0x8d, 0x0b, + 0xda, 0x03, 0xa7, 0x3c, 0x7b, 0xb4, 0x23, 0x29, 0x7c, 0x95, 0x2e, 0x48, 0x02, 0xc2, 0xd7, 0x11, + 0x8a, 0xe0, 0xa7, 0x18, 0xb8, 0x68, 0x91, 0xa6, 0x73, 0x41, 0x99, 0x64, 0xa9, 0x23, 0x99, 0x86, + 0xe4, 0x50, 0x78, 0x07, 0xad, 0x8c, 0x21, 0x6a, 0x3b, 0xab, 0x0a, 0x7d, 0x51, 0xa3, 0x57, 0x1e, + 0x43, 0xd4, 0x26, 0x4a, 0x83, 0xbf, 0x42, 0x2b, 0x31, 0x87, 0xc8, 0xa9, 0xa8, 0x5c, 0xbd, 0x9b, + 0xcb, 0x95, 0x3b, 0x5b, 0xa6, 0x32, 0x47, 0x2d, 0x0e, 0x51, 0x93, 0x1d, 0x07, 0x86, 0x49, 0x4a, + 0x88, 0x62, 0xc0, 0x7d, 0xb4, 0xe9, 0x8f, 0x42, 0x88, 0x78, 0xc0, 0x64, 0xa9, 0x48, 0x8d, 0x63, + 0x9f, 0x8b, 0xf5, 0xf2, 0x74, 0x52, 0xdf, 0x6c, 0xce, 0x71, 0x90, 0x53, 0xac, 0xf8, 0x7d, 0x64, + 0xf3, 0x20, 0x8e, 0x3a, 0xd0, 0x7c, 0xc8, 0x1d, 0xb4, 0x53, 0xda, 0xb3, 0x1b, 0xeb, 0xf2, 0xd2, + 0x8e, 0x52, 0x21, 0x31, 0x7a, 0xec, 0x21, 0x5b, 0x86, 0x77, 0xd0, 0x03, 0x26, 0x1c, 0xac, 0xf2, + 0x90, 0xdd, 0x72, 0x2b, 0x55, 0x10, 0x83, 0xc1, 0xc7, 0xc8, 0x0e, 0x54, 0x21, 0x12, 0x38, 0x76, + 0xd6, 0xd4, 0x01, 0x3e, 0x76, 0x8b, 0xb6, 0x05, 0x5d, 0xd7, 0x04, 0x8e, 0x21, 0x02, 0xd6, 0x81, + 0x24, 0xb0, 0x4c, 0x48, 0x0c, 0x35, 0xee, 0xa3, 0x6a, 0x04, 0x3c, 0x0c, 0x18, 0x87, 0x23, 0x41, + 0x45, 0xcc, 0x9d, 0x8b, 0xca, 0xd9, 0x7e, 0xb1, 0x7a, 0x4d, 0x6c, 0x1a, 0x78, 0x3a, 0xa9, 0x57, + 0xc9, 0x0c, 0x0f, 0x99, 0xe3, 0xc5, 0x14, 0xad, 0xeb, 0x9a, 0x48, 0x02, 0x71, 0xd6, 0x95, 0xa3, + 0xbd, 0x85, 0x8e, 0xf4, 0xf3, 0x77, 0x5b, 0x6c, 0xc0, 0x82, 0x27, 0xac, 0xb1, 0x35, 0x9d, 0xd4, + 0xd7, 0x49, 0x9e, 0x82, 0xcc, 0x32, 0xe2, 0xae, 0x39, 0x8c, 0xf6, 0x51, 0x3d, 0xa7, 0x8f, 0x99, + 0x83, 0x68, 0x27, 0x73, 0x9c, 0xf8, 0x17, 0x0b, 0x39, 0xda, 0x2f, 0x81, 0x0e, 0xf8, 0x63, 0xe8, + 0x66, 0x0f, 0xd5, 0xd9, 0x50, 0x0e, 0xbd, 0x62, 0xd9, 0xbb, 0xef, 0x77, 0xa2, 0x40, 0x3d, 0xf9, + 0x1d, 0x5d, 0x0c, 0x0e, 0x59, 0x40, 0x4c, 0x16, 0xba, 0xc4, 0x01, 0xaa, 0xaa, 0xb7, 0x69, 0x82, + 0xd8, 0xfc, 0x7f, 0x41, 0xa4, 0x4f, 0xbf, 0x7a, 0x34, 0x43, 0x47, 0xe6, 0xe8, 0xf1, 0x13, 0xb4, + 0x46, 0x19, 0x0b, 0x84, 0x7a, 0x3b, 0xdc, 0xd9, 0xda, 0x29, 0xed, 0xad, 0x5d, 0xff, 0xbc, 0x78, + 0x75, 0xaa, 0xa6, 0xed, 0x1e, 0x18, 0x8a, 0x3b, 0x4c, 0x44, 0x27, 0x8d, 0x4b, 0xda, 0xfd, 0x5a, + 0x4e, 0x43, 0xf2, 0x9e, 0xb6, 0x3f, 0x43, 0x9b, 0xf3, 0x56, 0x78, 0x13, 0x95, 0x06, 0x70, 0xa2, + 0xda, 0xbe, 0x4d, 0xe4, 0x12, 0x5f, 0x46, 0xe5, 0x31, 0x1d, 0xc6, 0x90, 0xf4, 0x6a, 0x92, 0x6c, + 0x3e, 0x59, 0xbe, 0x65, 0xed, 0xfe, 0x61, 0x21, 0x5b, 0x39, 0xbf, 0xe7, 0x73, 0x81, 0xbf, 0x3f, + 0x35, 0x35, 0xdc, 0x62, 0x19, 0x93, 0xd6, 0x6a, 0x66, 0x6c, 0xea, 0x88, 0x2b, 0xa9, 0x24, 0x37, + 0x31, 0x1e, 0xa1, 0xb2, 0x2f, 0x60, 0xc4, 0x9d, 0x65, 0x95, 0x1e, 0xef, 0x9c, 0xe9, 0x69, 0xac, + 0xa7, 0x7d, 0xb8, 0x29, 0x59, 0x48, 0x42, 0xb6, 0xfb, 0x9b, 0x85, 0xaa, 0x77, 0xa3, 0x20, 0x0e, + 0x09, 0x24, 0xcd, 0x85, 0xe3, 0x77, 0x50, 0xb9, 0x27, 0x25, 0x49, 0x0a, 0x8c, 0x5d, 0x02, 0x4b, + 0x74, 0xb2, 0x59, 0x45, 0xa9, 0x85, 0x8a, 0x48, 0x37, 0xab, 0x8c, 0x86, 0x18, 0x3d, 0xbe, 0x29, + 0x5f, 0x6a, 0xb2, 0x39, 0xa4, 0x23, 0xe0, 0x4e, 0x49, 0x19, 0xe8, 0xf7, 0x97, 0x53, 0x90, 0x59, + 0xdc, 0xee, 0xd3, 0x12, 0xda, 0x98, 0x6b, 0x3d, 0x78, 0x1f, 0x55, 0x52, 0x90, 0x8e, 0x30, 0xcb, + 0x5a, 0xca, 0x45, 0x32, 0x84, 0xec, 0x93, 0x4c, 0x52, 0x85, 0xb4, 0xa3, 0xef, 0xcf, 0xf4, 0xc9, + 0xc3, 0x54, 0x41, 0x0c, 0x46, 0xce, 0x16, 0xb9, 0x51, 0x53, 0x36, 0x37, 0x5b, 0x24, 0x96, 0x28, + 0x0d, 0x6e, 0xa0, 0x52, 0xec, 0x77, 0xf5, 0xac, 0xbc, 0xaa, 0x01, 0xa5, 0x56, 0xd1, 0x39, 0x29, + 0x8d, 0xe5, 0xd4, 0xa3, 0xa1, 0xff, 0x18, 0x22, 0xee, 0x07, 0x4c, 0x0f, 0xca, 0x6c, 0xea, 0x1d, + 0x3c, 0x6c, 0x6a, 0x0d, 0xc9, 0xa1, 0xf0, 0x01, 0xda, 0x48, 0x8f, 0x95, 0x1a, 0x26, 0xe3, 0xf2, + 0x8a, 0x36, 0xdc, 0x20, 0xb3, 0x6a, 0x32, 0x8f, 0xc7, 0x1f, 0xa2, 0x35, 0x1e, 0xb7, 0xb3, 0xf4, + 0x25, 0xf3, 0x33, 0x7b, 0x26, 0x47, 0x46, 0x45, 0xf2, 0xb8, 0xdd, 0xbf, 0x96, 0xd1, 0x85, 0x87, + 0xc1, 0xd0, 0xef, 0x9c, 0xbc, 0x86, 0x9f, 0xd1, 0x37, 0xa8, 0x1c, 0xc5, 0x43, 0x48, 0xeb, 0xfc, + 0x46, 0xf1, 0x3a, 0x4f, 0x42, 0x24, 0xf1, 0x10, 0x4c, 0xd1, 0xca, 0x1d, 0x27, 0x09, 0x23, 0xbe, + 0x89, 0x50, 0x30, 0xf2, 0x85, 0xea, 0x46, 0x69, 0x11, 0x5e, 0x51, 0x81, 0x64, 0x52, 0xf3, 0x3f, + 0xc9, 0x41, 0xf1, 0x5d, 0xb4, 0x25, 0x77, 0xf7, 0x29, 0xa3, 0x3d, 0xe8, 0x7e, 0xe9, 0xc3, 0xb0, + 0xcb, 0x55, 0x01, 0x54, 0x1a, 0x6f, 0x69, 0x4f, 0x5b, 0x0f, 0xe6, 0x01, 0xe4, 0xb4, 0xcd, 0xee, + 0x9f, 0x16, 0x42, 0x49, 0x98, 0xaf, 0xa1, 0x63, 0xb4, 0x66, 0x3b, 0xc6, 0xd5, 0xf3, 0x66, 0x72, + 0x41, 0xcb, 0x78, 0xba, 0x92, 0x9e, 0x41, 0x26, 0xd7, 0xfc, 0x64, 0xad, 0x22, 0x3f, 0xd9, 0x3a, + 0x2a, 0xcb, 0x3f, 0x49, 0xda, 0x33, 0x6c, 0x89, 0x94, 0xff, 0x15, 0x4e, 0x12, 0x39, 0x76, 0x11, + 0x92, 0x0b, 0xd5, 0x6c, 0xd2, 0x3b, 0xaa, 0xca, 0x3b, 0x6a, 0x65, 0x52, 0x92, 0x43, 0x48, 0x42, + 0xf9, 0xe3, 0x93, 0xd7, 0x91, 0x11, 0xca, 0x8f, 0x20, 0x27, 0x89, 0x1c, 0xfb, 0xf9, 0x4e, 0x55, + 0x56, 0x99, 0xb8, 0x55, 0x3c, 0x13, 0xb3, 0xbd, 0xd1, 0xf4, 0x8e, 0x33, 0xfb, 0x9c, 0x8b, 0x50, + 0xd6, 0x48, 0xb8, 0x73, 0xc1, 0xc4, 0x9e, 0x75, 0x1a, 0x4e, 0x72, 0x08, 0xfc, 0x29, 0xda, 0x60, + 0x01, 0x4b, 0xa9, 0x5a, 0xe4, 0x1e, 0x77, 0x56, 0x95, 0xd1, 0x25, 0xf9, 0x9a, 0x0f, 0x67, 0x55, + 0x64, 0x1e, 0x3b, 0x57, 0xce, 0x95, 0xe2, 0xe5, 0xfc, 0xc5, 0x59, 0xe5, 0x6c, 0xab, 0x72, 0x7e, + 0xa3, 0x68, 0x29, 0x37, 0xdc, 0x67, 0x2f, 0x6b, 0x4b, 0xcf, 0x5f, 0xd6, 0x96, 0x5e, 0xbc, 0xac, + 0x2d, 0xfd, 0x3c, 0xad, 0x59, 0xcf, 0xa6, 0x35, 0xeb, 0xf9, 0xb4, 0x66, 0xbd, 0x98, 0xd6, 0xac, + 0xbf, 0xa7, 0x35, 0xeb, 0xd7, 0x7f, 0x6a, 0x4b, 0xdf, 0x56, 0xd2, 0x4c, 0xfe, 0x1b, 0x00, 0x00, + 0xff, 0xff, 0x3a, 0xc5, 0x5b, 0x91, 0xd7, 0x0e, 0x00, 0x00, } func (m *Event) Marshal() (dAtA []byte, err error) { @@ -716,6 +719,14 @@ func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.OmitManagedFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 if len(m.OmitStages) > 0 { for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.OmitStages[iNdEx]) @@ -819,6 +830,16 @@ func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.OmitManagedFields != nil { + i-- + if *m.OmitManagedFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } if len(m.OmitStages) > 0 { for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.OmitStages[iNdEx]) @@ -1054,6 +1075,7 @@ func (m *Policy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + n += 2 return n } @@ -1124,6 +1146,9 @@ func (m *PolicyRule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.OmitManagedFields != nil { + n += 2 + } return n } @@ -1227,6 +1252,7 @@ func (this *Policy) String() string { `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + repeatedStringForRules + `,`, `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `OmitManagedFields:` + fmt.Sprintf("%v", this.OmitManagedFields) + `,`, `}`, }, "") return s @@ -1265,6 +1291,7 @@ func (this *PolicyRule) String() string { `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `OmitManagedFields:` + valueToStringGenerated(this.OmitManagedFields) + `,`, `}`, }, "") return s @@ -2688,6 +2715,26 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitManagedFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OmitManagedFields = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3113,6 +3160,27 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitManagedFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OmitManagedFields = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto index 9b5138ea5..5c457e381 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto @@ -195,6 +195,15 @@ message Policy { // be specified per rule in which case the union of both are omitted. // +optional repeated string omitStages = 3; + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // This is used as a global default - a value of 'true' will omit the managed fileds, + // otherwise the managed fields will be included in the API audit log. + // Note that this can also be specified per rule in which case the value specified + // in a rule will override the global default. + // +optional + optional bool omitManagedFields = 4; } // PolicyList is a list of audit Policies. @@ -250,5 +259,16 @@ message PolicyRule { // An empty list means no restrictions will apply. // +optional repeated string omitStages = 8; + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // - a value of 'true' will drop the managed fields from the API audit log + // - a value of 'false' indicates that the managed fileds should be included + // in the API audit log + // Note that the value, if specified, in this rule will override the global default + // If a value is not specified then the global default specified in + // Policy.OmitManagedFields will stand. + // +optional + optional bool omitManagedFields = 9; } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go index 2616f5fe7..f108b9bc5 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go @@ -191,6 +191,15 @@ type Policy struct { // be specified per rule in which case the union of both are omitted. // +optional OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"` + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // This is used as a global default - a value of 'true' will omit the managed fileds, + // otherwise the managed fields will be included in the API audit log. + // Note that this can also be specified per rule in which case the value specified + // in a rule will override the global default. + // +optional + OmitManagedFields bool `json:"omitManagedFields,omitempty" protobuf:"varint,4,opt,name=omitManagedFields"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -254,6 +263,17 @@ type PolicyRule struct { // An empty list means no restrictions will apply. // +optional OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"` + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // - a value of 'true' will drop the managed fields from the API audit log + // - a value of 'false' indicates that the managed fileds should be included + // in the API audit log + // Note that the value, if specified, in this rule will override the global default + // If a value is not specified then the global default specified in + // Policy.OmitManagedFields will stand. + // +optional + OmitManagedFields *bool `json:"omitManagedFields,omitempty" protobuf:"varint,9,opt,name=omitManagedFields"` } // GroupResources represents resource kinds in an API group. diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go index 74a184205..de9e938d4 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -262,6 +263,7 @@ func autoConvert_v1alpha1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules)) out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = in.OmitManagedFields return nil } @@ -274,6 +276,7 @@ func autoConvert_audit_Policy_To_v1alpha1_Policy(in *audit.Policy, out *Policy, out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = in.OmitManagedFields return nil } @@ -313,6 +316,7 @@ func autoConvert_v1alpha1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *au out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = (*bool)(unsafe.Pointer(in.OmitManagedFields)) return nil } @@ -330,6 +334,7 @@ func autoConvert_audit_PolicyRule_To_v1alpha1_PolicyRule(in *audit.PolicyRule, o out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = (*bool)(unsafe.Pointer(in.OmitManagedFields)) return nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go index efb6cd87d..5f99185ee 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -279,6 +280,11 @@ func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { *out = make([]Stage, len(*in)) copy(*out, *in) } + if in.OmitManagedFields != nil { + in, out := &in.OmitManagedFields, &out.OmitManagedFields + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go index dd621a3ac..5070cb91b 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.prerelease-lifecycle.go index 1fb335211..d1a38ca83 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go index 0437d71cf..ef04b4463 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go @@ -261,87 +261,90 @@ func init() { } var fileDescriptor_c7e4d52063960930 = []byte{ - // 1279 bytes of a gzipped FileDescriptorProto + // 1320 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xd6, 0x71, 0x63, 0x8f, 0x1b, 0xc7, 0x9d, 0x56, 0x74, 0x95, 0x83, 0x6d, 0x8c, 0x04, - 0x11, 0xa4, 0xbb, 0x4d, 0x28, 0x24, 0x42, 0x02, 0x64, 0xab, 0x15, 0x58, 0x4a, 0x43, 0x34, 0x8e, - 0x2b, 0x04, 0x1c, 0x58, 0xdb, 0x2f, 0xf6, 0x62, 0x7b, 0x77, 0xd9, 0x99, 0x35, 0xca, 0x8d, 0x2f, - 0x80, 0xc4, 0x9d, 0xcf, 0xc0, 0x85, 0x0f, 0x50, 0x71, 0xcc, 0xb1, 0xc7, 0x9e, 0x2c, 0x62, 0xbe, - 0x45, 0x24, 0x24, 0x34, 0x7f, 0x76, 0x67, 0xed, 0xd4, 0xc2, 0xe1, 0xd0, 0xdb, 0xce, 0x7b, 0xbf, - 0xf7, 0x9b, 0x37, 0xbf, 0x7d, 0xf3, 0xde, 0xa0, 0x93, 0xe1, 0x21, 0xb5, 0x5c, 0xdf, 0x1e, 0x46, - 0x1d, 0x08, 0x3d, 0x60, 0x40, 0xed, 0x09, 0x78, 0x3d, 0x3f, 0xb4, 0x95, 0xc3, 0x09, 0x5c, 0x0a, - 0xe1, 0x04, 0x42, 0x3b, 0x18, 0xf6, 0xc5, 0xca, 0x76, 0xa2, 0x9e, 0xcb, 0xec, 0xc9, 0x5e, 0x07, - 0x98, 0xb3, 0x67, 0xf7, 0xc1, 0x83, 0xd0, 0x61, 0xd0, 0xb3, 0x82, 0xd0, 0x67, 0x3e, 0x7e, 0x4f, - 0x06, 0x5a, 0x49, 0xa0, 0x15, 0x0c, 0xfb, 0x62, 0x65, 0x89, 0x40, 0x4b, 0x05, 0x6e, 0x3f, 0xec, - 0xbb, 0x6c, 0x10, 0x75, 0xac, 0xae, 0x3f, 0xb6, 0xfb, 0x7e, 0xdf, 0xb7, 0x45, 0x7c, 0x27, 0x3a, - 0x13, 0x2b, 0xb1, 0x10, 0x5f, 0x92, 0x77, 0x7b, 0x57, 0x27, 0x64, 0x3b, 0x11, 0x1b, 0x80, 0xc7, - 0xdc, 0xae, 0xc3, 0x5c, 0xdf, 0xb3, 0x27, 0xd7, 0xb2, 0xd8, 0x7e, 0xac, 0xd1, 0x63, 0xa7, 0x3b, - 0x70, 0x3d, 0x08, 0xcf, 0xf5, 0x09, 0xc6, 0xc0, 0x9c, 0xd7, 0x45, 0xd9, 0xcb, 0xa2, 0xc2, 0xc8, - 0x63, 0xee, 0x18, 0xae, 0x05, 0x7c, 0xfc, 0x5f, 0x01, 0xb4, 0x3b, 0x80, 0xb1, 0xb3, 0x18, 0x57, - 0xfb, 0xfd, 0x0e, 0xca, 0x3e, 0x9d, 0x80, 0xc7, 0xf0, 0xf7, 0x28, 0xc7, 0xb3, 0xe9, 0x39, 0xcc, - 0x31, 0x8d, 0xaa, 0xb1, 0x53, 0xd8, 0x7f, 0x64, 0x69, 0x05, 0x13, 0x52, 0x2d, 0x22, 0x47, 0x5b, - 0x93, 0x3d, 0xeb, 0xab, 0xce, 0x0f, 0xd0, 0x65, 0xcf, 0x80, 0x39, 0x0d, 0x7c, 0x31, 0xad, 0xac, - 0xcd, 0xa6, 0x15, 0xa4, 0x6d, 0x24, 0x61, 0xc5, 0xbb, 0x28, 0x3b, 0x82, 0x09, 0x8c, 0xcc, 0x5b, - 0x55, 0x63, 0x27, 0xdf, 0x78, 0x4b, 0x81, 0xb3, 0x47, 0xdc, 0x78, 0x15, 0x7f, 0x10, 0x09, 0xc2, - 0xdf, 0xa2, 0x3c, 0x4f, 0x9c, 0x32, 0x67, 0x1c, 0x98, 0x19, 0x91, 0xd0, 0xfb, 0xab, 0x25, 0x74, - 0xea, 0x8e, 0xa1, 0x71, 0x57, 0xb1, 0xe7, 0x4f, 0x63, 0x12, 0xa2, 0xf9, 0xf0, 0x31, 0xda, 0x10, - 0x35, 0xd0, 0x7c, 0x62, 0xae, 0x8b, 0x64, 0x1e, 0x2b, 0xf8, 0x46, 0x5d, 0x9a, 0xaf, 0xa6, 0x95, - 0xb7, 0x97, 0x49, 0xca, 0xce, 0x03, 0xa0, 0x56, 0xbb, 0xf9, 0x84, 0xc4, 0x24, 0xfc, 0x68, 0x94, - 0x39, 0x7d, 0x30, 0xb3, 0xf3, 0x47, 0x6b, 0x71, 0xe3, 0x55, 0xfc, 0x41, 0x24, 0x08, 0xef, 0x23, - 0x14, 0xc2, 0x8f, 0x11, 0x50, 0xd6, 0x26, 0x4d, 0xf3, 0xb6, 0x08, 0x49, 0xa4, 0x23, 0x89, 0x87, - 0xa4, 0x50, 0xb8, 0x8a, 0xd6, 0x27, 0x10, 0x76, 0xcc, 0x0d, 0x81, 0xbe, 0xa3, 0xd0, 0xeb, 0xcf, - 0x21, 0xec, 0x10, 0xe1, 0xc1, 0x5f, 0xa2, 0xf5, 0x88, 0x42, 0x68, 0xe6, 0x84, 0x56, 0xef, 0xa6, - 0xb4, 0xb2, 0xe6, 0xcb, 0x94, 0x6b, 0xd4, 0xa6, 0x10, 0x36, 0xbd, 0x33, 0x5f, 0x33, 0x71, 0x0b, - 0x11, 0x0c, 0x78, 0x80, 0x4a, 0xee, 0x38, 0x80, 0x90, 0xfa, 0x1e, 0x2f, 0x15, 0xee, 0x31, 0xf3, - 0x37, 0x62, 0xbd, 0x3f, 0x9b, 0x56, 0x4a, 0xcd, 0x05, 0x0e, 0x72, 0x8d, 0x15, 0x7f, 0x80, 0xf2, - 0xd4, 0x8f, 0xc2, 0x2e, 0x34, 0x4f, 0xa8, 0x89, 0xaa, 0x99, 0x9d, 0x7c, 0x63, 0x93, 0xff, 0xb4, - 0x56, 0x6c, 0x24, 0xda, 0x8f, 0x6d, 0x94, 0xe7, 0xe9, 0xd5, 0xfb, 0xe0, 0x31, 0x13, 0x0b, 0x1d, - 0x92, 0xbf, 0xdc, 0x8e, 0x1d, 0x44, 0x63, 0x30, 0xa0, 0xbc, 0x2f, 0x0a, 0x91, 0xc0, 0x99, 0x59, - 0x10, 0x07, 0x38, 0xb4, 0x56, 0xec, 0x0a, 0xaa, 0xac, 0x09, 0x9c, 0x41, 0x08, 0x5e, 0x17, 0x64, - 0x5e, 0x89, 0x91, 0x68, 0x66, 0x3c, 0x40, 0xc5, 0x10, 0x68, 0xe0, 0x7b, 0x14, 0x5a, 0xcc, 0x61, - 0x11, 0x35, 0xef, 0x88, 0xbd, 0x76, 0x57, 0x2b, 0x57, 0x19, 0xd3, 0xc0, 0xb3, 0x69, 0xa5, 0x48, - 0xe6, 0x78, 0xc8, 0x02, 0x2f, 0x76, 0xd0, 0xa6, 0x2a, 0x09, 0x99, 0x88, 0xb9, 0x29, 0x36, 0xda, - 0x59, 0xba, 0x91, 0xba, 0xfd, 0x56, 0xdb, 0x1b, 0x7a, 0xfe, 0x4f, 0x5e, 0xe3, 0xee, 0x6c, 0x5a, - 0xd9, 0x24, 0x69, 0x0a, 0x32, 0xcf, 0x88, 0x7b, 0xfa, 0x30, 0x6a, 0x8f, 0xe2, 0x0d, 0xf7, 0x98, - 0x3b, 0x88, 0xda, 0x64, 0x81, 0x13, 0xff, 0x62, 0x20, 0x53, 0xed, 0x4b, 0xa0, 0x0b, 0xee, 0x04, - 0x7a, 0xc9, 0x3d, 0x35, 0xb7, 0xc4, 0x86, 0xf6, 0x6a, 0xea, 0x3d, 0x73, 0xbb, 0xa1, 0x2f, 0x6e, - 0x7c, 0x55, 0xd5, 0x82, 0x49, 0x96, 0x10, 0x93, 0xa5, 0x5b, 0x62, 0x1f, 0x15, 0xc5, 0xd5, 0xd4, - 0x49, 0x94, 0xfe, 0x5f, 0x12, 0xf1, 0xcd, 0x2f, 0xb6, 0xe6, 0xe8, 0xc8, 0x02, 0x3d, 0x9e, 0xa0, - 0x82, 0xe3, 0x79, 0x3e, 0x13, 0x57, 0x87, 0x9a, 0x77, 0xab, 0x99, 0x9d, 0xc2, 0xfe, 0xe7, 0x2b, - 0x17, 0xa7, 0x68, 0xd9, 0x56, 0x5d, 0x33, 0x3c, 0xf5, 0x58, 0x78, 0xde, 0xb8, 0xa7, 0x76, 0x2f, - 0xa4, 0x3c, 0x24, 0xbd, 0xd1, 0xf6, 0x67, 0xa8, 0xb4, 0x18, 0x85, 0x4b, 0x28, 0x33, 0x84, 0x73, - 0xd1, 0xf4, 0xf3, 0x84, 0x7f, 0xe2, 0xfb, 0x28, 0x3b, 0x71, 0x46, 0x11, 0xc8, 0x4e, 0x4d, 0xe4, - 0xe2, 0x93, 0x5b, 0x87, 0x46, 0xed, 0x85, 0x81, 0xf2, 0x62, 0xf3, 0x23, 0x97, 0x32, 0xfc, 0xdd, - 0xb5, 0x99, 0x61, 0xad, 0x26, 0x18, 0x8f, 0x16, 0x13, 0xa3, 0xa4, 0x32, 0xce, 0xc5, 0x96, 0xd4, - 0xbc, 0x68, 0xa1, 0xac, 0xcb, 0x60, 0x4c, 0xcd, 0x5b, 0x42, 0x1d, 0xeb, 0x66, 0xea, 0x34, 0x36, - 0xe3, 0x26, 0xdc, 0xe4, 0x24, 0x44, 0x72, 0xd5, 0x7e, 0x33, 0x50, 0xf1, 0x8b, 0xd0, 0x8f, 0x02, - 0x02, 0xb2, 0xb3, 0x50, 0xfc, 0x0e, 0xca, 0xf6, 0xb9, 0x45, 0x2a, 0xa0, 0xe3, 0x24, 0x4c, 0xfa, - 0x78, 0xa7, 0x0a, 0xe3, 0x08, 0x91, 0x90, 0xea, 0x54, 0x09, 0x0d, 0xd1, 0x7e, 0x7c, 0xc0, 0xef, - 0xa9, 0x5c, 0x1c, 0x3b, 0x63, 0xa0, 0x66, 0x46, 0x04, 0xa8, 0xdb, 0x97, 0x72, 0x90, 0x79, 0x5c, - 0xed, 0x8f, 0x0c, 0xda, 0x5a, 0x68, 0x3c, 0x78, 0x17, 0xe5, 0x62, 0x90, 0xca, 0x30, 0x11, 0x2d, - 0xe6, 0x22, 0x09, 0x82, 0x37, 0x49, 0x8f, 0x53, 0x05, 0x4e, 0x57, 0xfd, 0x3e, 0xdd, 0x24, 0x8f, - 0x63, 0x07, 0xd1, 0x18, 0x3e, 0x58, 0xf8, 0x42, 0x8c, 0xd8, 0xd4, 0x60, 0xe1, 0x58, 0x22, 0x3c, - 0xb8, 0x81, 0x32, 0x91, 0xdb, 0x53, 0x83, 0xf2, 0x91, 0x02, 0x64, 0xda, 0xab, 0x0e, 0x49, 0x1e, - 0xcc, 0x0f, 0xe1, 0x04, 0xae, 0x50, 0x54, 0xcd, 0xc8, 0xe4, 0x10, 0xf5, 0x93, 0xa6, 0x54, 0x3a, - 0x41, 0xf0, 0x01, 0xe9, 0x04, 0xee, 0x73, 0x08, 0xa9, 0xeb, 0x7b, 0x8b, 0x03, 0xb2, 0x7e, 0xd2, - 0x54, 0x1e, 0x92, 0x42, 0xe1, 0x3a, 0xda, 0x8a, 0x45, 0x88, 0x03, 0xe5, 0xac, 0x7c, 0xa0, 0x02, - 0xb7, 0xc8, 0xbc, 0x9b, 0x2c, 0xe2, 0xf1, 0x47, 0xa8, 0x40, 0xa3, 0x4e, 0x22, 0x76, 0x4e, 0x84, - 0x27, 0x77, 0xaa, 0xa5, 0x5d, 0x24, 0x8d, 0xab, 0xfd, 0x63, 0xa0, 0xdb, 0x27, 0xfe, 0xc8, 0xed, - 0x9e, 0xbf, 0x81, 0x47, 0xd4, 0xd7, 0x28, 0x1b, 0x46, 0x23, 0x88, 0x2f, 0xc5, 0x87, 0x2b, 0x5f, - 0x0a, 0x99, 0x21, 0x89, 0x46, 0xa0, 0x2b, 0x9c, 0xaf, 0x28, 0x91, 0x84, 0xf8, 0x00, 0x21, 0x7f, - 0xec, 0x32, 0xd1, 0xb8, 0xe2, 0x8a, 0x7d, 0x20, 0xf2, 0x48, 0xac, 0xfa, 0x25, 0x93, 0x82, 0xd6, - 0xfe, 0x34, 0x10, 0x92, 0xec, 0x6f, 0xa0, 0x29, 0x9c, 0xce, 0x37, 0x05, 0xfb, 0x86, 0xe7, 0x5f, - 0xd2, 0x15, 0x5e, 0x64, 0xe2, 0x23, 0x70, 0x49, 0xf4, 0x4b, 0xd5, 0x58, 0xe5, 0xa5, 0x5a, 0x41, - 0x59, 0xfe, 0xe6, 0x88, 0xdb, 0x42, 0x9e, 0x23, 0xf9, 0x7b, 0x84, 0x12, 0x69, 0xc7, 0x16, 0x42, - 0xfc, 0x43, 0xd4, 0x76, 0xac, 0x6c, 0x91, 0x2b, 0xdb, 0x4e, 0xac, 0x24, 0x85, 0xe0, 0x84, 0xfc, - 0x45, 0x47, 0xcd, 0x75, 0x4d, 0xc8, 0x1f, 0x7a, 0x94, 0x48, 0x3b, 0x1e, 0xa4, 0x9b, 0x51, 0x56, - 0x08, 0x71, 0xb0, 0xb2, 0x10, 0xf3, 0xdd, 0x4f, 0x77, 0x87, 0xd7, 0x76, 0x32, 0x0b, 0xa1, 0xa4, - 0x55, 0x50, 0xf3, 0xb6, 0x4e, 0x3d, 0xe9, 0x25, 0x94, 0xa4, 0x10, 0xf8, 0x53, 0xb4, 0xe5, 0xf9, - 0x5e, 0x4c, 0xd5, 0x26, 0x47, 0xd4, 0xdc, 0x10, 0x41, 0xf7, 0xf8, 0x0d, 0x3c, 0x9e, 0x77, 0x91, - 0x45, 0xec, 0x42, 0x0d, 0xe6, 0x56, 0xae, 0xc1, 0xc6, 0xc3, 0x8b, 0xcb, 0xf2, 0xda, 0xcb, 0xcb, - 0xf2, 0xda, 0xab, 0xcb, 0xf2, 0xda, 0xcf, 0xb3, 0xb2, 0x71, 0x31, 0x2b, 0x1b, 0x2f, 0x67, 0x65, - 0xe3, 0xd5, 0xac, 0x6c, 0xfc, 0x35, 0x2b, 0x1b, 0xbf, 0xfe, 0x5d, 0x5e, 0xfb, 0x66, 0x43, 0x69, - 0xf0, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4f, 0xae, 0x4a, 0x9d, 0x6e, 0x0e, 0x00, 0x00, + 0x14, 0xcf, 0xc6, 0x71, 0xe3, 0x9d, 0x34, 0x4e, 0x32, 0x2d, 0x74, 0xc9, 0xc1, 0x0e, 0x41, 0x82, + 0x08, 0xd2, 0xdd, 0xb6, 0x14, 0x5a, 0x21, 0x01, 0x8a, 0x69, 0x29, 0x96, 0xda, 0x34, 0x9a, 0xd4, + 0x15, 0x02, 0x0e, 0x8c, 0xed, 0x17, 0x7b, 0x89, 0x3d, 0xbb, 0xec, 0xcc, 0x1a, 0xe5, 0x86, 0xc4, + 0x19, 0x89, 0x3b, 0x9f, 0x81, 0x0b, 0x1f, 0x00, 0x71, 0x42, 0x3d, 0xf6, 0xd8, 0x53, 0x44, 0xcd, + 0xb7, 0xe8, 0x09, 0xcd, 0xec, 0xec, 0xce, 0x7a, 0x53, 0x8b, 0x0d, 0x87, 0xde, 0x76, 0xde, 0xfb, + 0xbd, 0xdf, 0x7b, 0xf3, 0xf6, 0xfd, 0x19, 0x74, 0x70, 0x7c, 0x9b, 0xbb, 0x7e, 0xe0, 0x1d, 0xc7, + 0x5d, 0x88, 0x18, 0x08, 0xe0, 0xde, 0x04, 0x58, 0x3f, 0x88, 0x3c, 0xad, 0xa0, 0xa1, 0xcf, 0x21, + 0x9a, 0x40, 0xe4, 0x85, 0xc7, 0x03, 0x75, 0xf2, 0x68, 0xdc, 0xf7, 0x85, 0x37, 0xb9, 0xde, 0x05, + 0x41, 0xaf, 0x7b, 0x03, 0x60, 0x10, 0x51, 0x01, 0x7d, 0x37, 0x8c, 0x02, 0x11, 0xe0, 0x77, 0x12, + 0x43, 0x37, 0x33, 0x74, 0xc3, 0xe3, 0x81, 0x3a, 0xb9, 0xca, 0xd0, 0xd5, 0x86, 0x9b, 0x57, 0x07, + 0xbe, 0x18, 0xc6, 0x5d, 0xb7, 0x17, 0x8c, 0xbd, 0x41, 0x30, 0x08, 0x3c, 0x65, 0xdf, 0x8d, 0x8f, + 0xd4, 0x49, 0x1d, 0xd4, 0x57, 0xc2, 0xbb, 0xb9, 0x6b, 0x02, 0xf2, 0x68, 0x2c, 0x86, 0xc0, 0x84, + 0xdf, 0xa3, 0xc2, 0x0f, 0x98, 0x37, 0x39, 0x13, 0xc5, 0xe6, 0x4d, 0x83, 0x1e, 0xd3, 0xde, 0xd0, + 0x67, 0x10, 0x9d, 0x98, 0x1b, 0x8c, 0x41, 0xd0, 0x97, 0x59, 0x79, 0xf3, 0xac, 0xa2, 0x98, 0x09, + 0x7f, 0x0c, 0x67, 0x0c, 0x3e, 0xfc, 0x2f, 0x03, 0xde, 0x1b, 0xc2, 0x98, 0x16, 0xed, 0xb6, 0x7f, + 0xbb, 0x88, 0xaa, 0x77, 0x27, 0xc0, 0x04, 0xfe, 0x16, 0xd5, 0x64, 0x34, 0x7d, 0x2a, 0xa8, 0x63, + 0x6d, 0x59, 0x3b, 0x2b, 0x37, 0xae, 0xb9, 0x26, 0x83, 0x19, 0xa9, 0x49, 0xa2, 0x44, 0xbb, 0x93, + 0xeb, 0xee, 0xc3, 0xee, 0x77, 0xd0, 0x13, 0x0f, 0x40, 0xd0, 0x16, 0x7e, 0x72, 0xda, 0x5c, 0x98, + 0x9e, 0x36, 0x91, 0x91, 0x91, 0x8c, 0x15, 0xef, 0xa2, 0xea, 0x08, 0x26, 0x30, 0x72, 0x16, 0xb7, + 0xac, 0x1d, 0xbb, 0xf5, 0xba, 0x06, 0x57, 0xef, 0x4b, 0xe1, 0x8b, 0xf4, 0x83, 0x24, 0x20, 0xfc, + 0x35, 0xb2, 0x65, 0xe0, 0x5c, 0xd0, 0x71, 0xe8, 0x54, 0x54, 0x40, 0xef, 0x96, 0x0b, 0xe8, 0x91, + 0x3f, 0x86, 0xd6, 0x86, 0x66, 0xb7, 0x1f, 0xa5, 0x24, 0xc4, 0xf0, 0xe1, 0x7d, 0xb4, 0xac, 0x6a, + 0xa0, 0x7d, 0xc7, 0x59, 0x52, 0xc1, 0xdc, 0xd4, 0xf0, 0xe5, 0xbd, 0x44, 0xfc, 0xe2, 0xb4, 0xf9, + 0xe6, 0xbc, 0x94, 0x8a, 0x93, 0x10, 0xb8, 0xdb, 0x69, 0xdf, 0x21, 0x29, 0x89, 0xbc, 0x1a, 0x17, + 0x74, 0x00, 0x4e, 0x75, 0xf6, 0x6a, 0x87, 0x52, 0xf8, 0x22, 0xfd, 0x20, 0x09, 0x08, 0xdf, 0x40, + 0x28, 0x82, 0xef, 0x63, 0xe0, 0xa2, 0x43, 0xda, 0xce, 0x05, 0x65, 0x92, 0xa5, 0x8e, 0x64, 0x1a, + 0x92, 0x43, 0xe1, 0x2d, 0xb4, 0x34, 0x81, 0xa8, 0xeb, 0x2c, 0x2b, 0xf4, 0x45, 0x8d, 0x5e, 0x7a, + 0x0c, 0x51, 0x97, 0x28, 0x0d, 0xfe, 0x02, 0x2d, 0xc5, 0x1c, 0x22, 0xa7, 0xa6, 0x72, 0xf5, 0x76, + 0x2e, 0x57, 0xee, 0x6c, 0x99, 0xca, 0x1c, 0x75, 0x38, 0x44, 0x6d, 0x76, 0x14, 0x18, 0x26, 0x29, + 0x21, 0x8a, 0x01, 0x0f, 0xd1, 0xba, 0x3f, 0x0e, 0x21, 0xe2, 0x01, 0x93, 0xa5, 0x22, 0x35, 0x8e, + 0x7d, 0x2e, 0xd6, 0xcb, 0xd3, 0xd3, 0xe6, 0x7a, 0xbb, 0xc0, 0x41, 0xce, 0xb0, 0xe2, 0xf7, 0x90, + 0xcd, 0x83, 0x38, 0xea, 0x41, 0xfb, 0x80, 0x3b, 0x68, 0xab, 0xb2, 0x63, 0xb7, 0x56, 0xe5, 0x4f, + 0x3b, 0x4c, 0x85, 0xc4, 0xe8, 0xb1, 0x87, 0x6c, 0x19, 0xde, 0xde, 0x00, 0x98, 0x70, 0xb0, 0xca, + 0x43, 0xf6, 0x97, 0x3b, 0xa9, 0x82, 0x18, 0x0c, 0x06, 0x64, 0x07, 0xaa, 0x10, 0x09, 0x1c, 0x39, + 0x2b, 0xea, 0x02, 0xb7, 0xdd, 0x92, 0x53, 0x41, 0x97, 0x35, 0x81, 0x23, 0x88, 0x80, 0xf5, 0x20, + 0x89, 0x2b, 0x13, 0x12, 0xc3, 0x8c, 0x87, 0xa8, 0x1e, 0x01, 0x0f, 0x03, 0xc6, 0xe1, 0x50, 0x50, + 0x11, 0x73, 0xe7, 0xa2, 0xf2, 0xb5, 0x5b, 0xae, 0x5c, 0x13, 0x9b, 0x16, 0x9e, 0x9e, 0x36, 0xeb, + 0x64, 0x86, 0x87, 0x14, 0x78, 0x31, 0x45, 0xab, 0xba, 0x24, 0x92, 0x40, 0x9c, 0x55, 0xe5, 0x68, + 0x67, 0xae, 0x23, 0xdd, 0xfd, 0x6e, 0x87, 0x1d, 0xb3, 0xe0, 0x07, 0xd6, 0xda, 0x98, 0x9e, 0x36, + 0x57, 0x49, 0x9e, 0x82, 0xcc, 0x32, 0xe2, 0xbe, 0xb9, 0x8c, 0xf6, 0x51, 0x3f, 0xa7, 0x8f, 0x99, + 0x8b, 0x68, 0x27, 0x05, 0x4e, 0xfc, 0xb3, 0x85, 0x1c, 0xed, 0x97, 0x40, 0x0f, 0xfc, 0x09, 0xf4, + 0xb3, 0x3e, 0x75, 0xd6, 0x94, 0x43, 0xaf, 0x5c, 0xf6, 0x1e, 0xf8, 0xbd, 0x28, 0x50, 0x1d, 0xbf, + 0xa5, 0x6b, 0xc1, 0x21, 0x73, 0x88, 0xc9, 0x5c, 0x97, 0x38, 0x40, 0x75, 0xd5, 0x9a, 0x26, 0x88, + 0xf5, 0xff, 0x17, 0x44, 0xda, 0xf9, 0xf5, 0xc3, 0x19, 0x3a, 0x52, 0xa0, 0xc7, 0x13, 0xb4, 0x42, + 0x19, 0x0b, 0x84, 0x6a, 0x1d, 0xee, 0x6c, 0x6c, 0x55, 0x76, 0x56, 0x6e, 0x7c, 0x5a, 0xba, 0x38, + 0xd5, 0xc8, 0x76, 0xf7, 0x0c, 0xc3, 0x5d, 0x26, 0xa2, 0x93, 0xd6, 0x25, 0xed, 0x7d, 0x25, 0xa7, + 0x21, 0x79, 0x47, 0x9b, 0x9f, 0xa0, 0xf5, 0xa2, 0x15, 0x5e, 0x47, 0x95, 0x63, 0x38, 0x51, 0x43, + 0xdf, 0x26, 0xf2, 0x13, 0x5f, 0x46, 0xd5, 0x09, 0x1d, 0xc5, 0x90, 0x4c, 0x6a, 0x92, 0x1c, 0x3e, + 0x5a, 0xbc, 0x6d, 0x6d, 0xff, 0x61, 0x21, 0x5b, 0x39, 0xbf, 0xef, 0x73, 0x81, 0xbf, 0x39, 0xb3, + 0x33, 0xdc, 0x72, 0x09, 0x93, 0xd6, 0x6a, 0x63, 0xac, 0xeb, 0x88, 0x6b, 0xa9, 0x24, 0xb7, 0x2f, + 0x0e, 0x51, 0xd5, 0x17, 0x30, 0xe6, 0xce, 0xa2, 0xca, 0x8e, 0x7b, 0xbe, 0xec, 0xb4, 0x56, 0xd3, + 0x21, 0xdc, 0x96, 0x24, 0x24, 0xe1, 0xda, 0xfe, 0xd5, 0x42, 0xf5, 0x7b, 0x51, 0x10, 0x87, 0x04, + 0x92, 0xc9, 0xc2, 0xf1, 0x5b, 0xa8, 0x3a, 0x90, 0x92, 0x24, 0x03, 0xc6, 0x2e, 0x81, 0x25, 0x3a, + 0x39, 0xa9, 0xa2, 0xd4, 0x42, 0x05, 0xa4, 0x27, 0x55, 0x46, 0x43, 0x8c, 0x1e, 0xdf, 0x92, 0x7d, + 0x9a, 0x1c, 0xf6, 0xe9, 0x18, 0xb8, 0x53, 0x51, 0x06, 0xba, 0xfb, 0x72, 0x0a, 0x32, 0x8b, 0xdb, + 0xfe, 0xbd, 0x82, 0xd6, 0x0a, 0x83, 0x07, 0xef, 0xa2, 0x5a, 0x0a, 0xd2, 0x11, 0x66, 0x49, 0x4b, + 0xb9, 0x48, 0x86, 0x90, 0x43, 0x92, 0x49, 0xaa, 0x90, 0xf6, 0xf4, 0xef, 0x33, 0x43, 0x72, 0x3f, + 0x55, 0x10, 0x83, 0x91, 0x8b, 0x45, 0x1e, 0xd4, 0x8a, 0xcd, 0x2d, 0x16, 0x89, 0x25, 0x4a, 0x83, + 0x5b, 0xa8, 0x12, 0xfb, 0x7d, 0xbd, 0x28, 0xaf, 0x69, 0x40, 0xa5, 0x53, 0x76, 0x49, 0x4a, 0x63, + 0x79, 0x09, 0x1a, 0xfa, 0x2a, 0xa3, 0x7a, 0x47, 0x66, 0x97, 0xd8, 0x3b, 0x68, 0x27, 0x99, 0xce, + 0x10, 0x72, 0x41, 0xd2, 0xd0, 0x7f, 0x0c, 0x11, 0xf7, 0x03, 0x56, 0x5c, 0x90, 0x7b, 0x07, 0x6d, + 0xad, 0x21, 0x39, 0x14, 0xde, 0x43, 0x6b, 0x69, 0x12, 0x52, 0xc3, 0x64, 0x57, 0x5e, 0xd1, 0x86, + 0x6b, 0x64, 0x56, 0x4d, 0x8a, 0x78, 0xfc, 0x01, 0x5a, 0xe1, 0x71, 0x37, 0x4b, 0x76, 0x4d, 0x99, + 0x67, 0x3d, 0x75, 0x68, 0x54, 0x24, 0x8f, 0xdb, 0xfe, 0x6b, 0x11, 0x5d, 0x38, 0x08, 0x46, 0x7e, + 0xef, 0xe4, 0x15, 0x3c, 0xa2, 0xbe, 0x44, 0xd5, 0x28, 0x1e, 0x41, 0xda, 0x14, 0xef, 0x97, 0x6e, + 0x8a, 0x24, 0x42, 0x12, 0x8f, 0xc0, 0x54, 0xb8, 0x3c, 0x71, 0x92, 0x10, 0xe2, 0x5b, 0x08, 0x05, + 0x63, 0x5f, 0xa8, 0xc1, 0x95, 0x56, 0xec, 0x15, 0x15, 0x47, 0x26, 0x35, 0x2f, 0x99, 0x1c, 0x14, + 0xdf, 0x43, 0x1b, 0xf2, 0xf4, 0x80, 0x32, 0x3a, 0x80, 0xfe, 0xe7, 0x3e, 0x8c, 0xfa, 0x5c, 0x55, + 0x4b, 0xad, 0xf5, 0x86, 0xf6, 0xb4, 0xf1, 0xb0, 0x08, 0x20, 0x67, 0x6d, 0xb6, 0xff, 0xb4, 0x10, + 0x4a, 0xc2, 0x7c, 0x05, 0xd3, 0xe5, 0xd1, 0xec, 0x74, 0xf1, 0xce, 0x99, 0xc8, 0x39, 0xe3, 0xe5, + 0xa7, 0xa5, 0xf4, 0x0a, 0x32, 0xb7, 0xe6, 0xc9, 0x6b, 0x95, 0x79, 0xf2, 0x36, 0x51, 0x55, 0x3e, + 0x5e, 0xd2, 0xf9, 0x62, 0x4b, 0xa4, 0x7c, 0xd8, 0x70, 0x92, 0xc8, 0xb1, 0x8b, 0x90, 0xfc, 0x50, + 0x4d, 0x92, 0xfe, 0xa2, 0xba, 0xfc, 0x45, 0x9d, 0x4c, 0x4a, 0x72, 0x08, 0x49, 0x28, 0x9f, 0x86, + 0xf2, 0x6f, 0x64, 0x84, 0xf2, 0xc5, 0xc8, 0x49, 0x22, 0xc7, 0xc3, 0xfc, 0x54, 0xab, 0xaa, 0x44, + 0xdc, 0x2a, 0x9d, 0x88, 0xd9, 0x31, 0x6a, 0xc6, 0xcc, 0x4b, 0x47, 0xa2, 0x8b, 0x50, 0x36, 0x73, + 0xb8, 0x73, 0xc1, 0x84, 0x9e, 0x0d, 0x25, 0x4e, 0x72, 0x08, 0xfc, 0x31, 0x5a, 0x63, 0x01, 0x4b, + 0xa9, 0x3a, 0xe4, 0x3e, 0x77, 0x96, 0x95, 0xd1, 0x25, 0xd9, 0xca, 0xfb, 0xb3, 0x2a, 0x52, 0xc4, + 0x16, 0x8a, 0xb9, 0x56, 0xbe, 0x98, 0x3f, 0x7b, 0x59, 0x31, 0xdb, 0xaa, 0x98, 0x5f, 0x2b, 0x5b, + 0xc8, 0xad, 0xab, 0x4f, 0x9e, 0x37, 0x16, 0x9e, 0x3e, 0x6f, 0x2c, 0x3c, 0x7b, 0xde, 0x58, 0xf8, + 0x71, 0xda, 0xb0, 0x9e, 0x4c, 0x1b, 0xd6, 0xd3, 0x69, 0xc3, 0x7a, 0x36, 0x6d, 0x58, 0x7f, 0x4f, + 0x1b, 0xd6, 0x2f, 0xff, 0x34, 0x16, 0xbe, 0x5a, 0xd6, 0x89, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, + 0x21, 0x57, 0x33, 0x77, 0xfc, 0x0e, 0x00, 0x00, } func (m *Event) Marshal() (dAtA []byte, err error) { @@ -722,6 +725,14 @@ func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.OmitManagedFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 if len(m.OmitStages) > 0 { for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.OmitStages[iNdEx]) @@ -825,6 +836,16 @@ func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.OmitManagedFields != nil { + i-- + if *m.OmitManagedFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } if len(m.OmitStages) > 0 { for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.OmitStages[iNdEx]) @@ -1062,6 +1083,7 @@ func (m *Policy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + n += 2 return n } @@ -1132,6 +1154,9 @@ func (m *PolicyRule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.OmitManagedFields != nil { + n += 2 + } return n } @@ -1236,6 +1261,7 @@ func (this *Policy) String() string { `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + repeatedStringForRules + `,`, `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `OmitManagedFields:` + fmt.Sprintf("%v", this.OmitManagedFields) + `,`, `}`, }, "") return s @@ -1274,6 +1300,7 @@ func (this *PolicyRule) String() string { `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `OmitManagedFields:` + valueToStringGenerated(this.OmitManagedFields) + `,`, `}`, }, "") return s @@ -2729,6 +2756,26 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitManagedFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OmitManagedFields = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3154,6 +3201,27 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitManagedFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OmitManagedFields = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto index 0a200b538..e78039fdd 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto @@ -204,6 +204,15 @@ message Policy { // be specified per rule in which case the union of both are omitted. // +optional repeated string omitStages = 3; + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // This is used as a global default - a value of 'true' will omit the managed fileds, + // otherwise the managed fields will be included in the API audit log. + // Note that this can also be specified per rule in which case the value specified + // in a rule will override the global default. + // +optional + optional bool omitManagedFields = 4; } // PolicyList is a list of audit Policies. @@ -259,5 +268,16 @@ message PolicyRule { // An empty list means no restrictions will apply. // +optional repeated string omitStages = 8; + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // - a value of 'true' will drop the managed fields from the API audit log + // - a value of 'false' indicates that the managed fileds should be included + // in the API audit log + // Note that the value, if specified, in this rule will override the global default + // If a value is not specified then the global default specified in + // Policy.OmitManagedFields will stand. + // +optional + optional bool omitManagedFields = 9; } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go index eb538bc17..fcca2bcb8 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go @@ -187,6 +187,15 @@ type Policy struct { // be specified per rule in which case the union of both are omitted. // +optional OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"` + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // This is used as a global default - a value of 'true' will omit the managed fileds, + // otherwise the managed fields will be included in the API audit log. + // Note that this can also be specified per rule in which case the value specified + // in a rule will override the global default. + // +optional + OmitManagedFields bool `json:"omitManagedFields,omitempty" protobuf:"varint,4,opt,name=omitManagedFields"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -250,6 +259,17 @@ type PolicyRule struct { // An empty list means no restrictions will apply. // +optional OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"` + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // - a value of 'true' will drop the managed fields from the API audit log + // - a value of 'false' indicates that the managed fileds should be included + // in the API audit log + // Note that the value, if specified, in this rule will override the global default + // If a value is not specified then the global default specified in + // Policy.OmitManagedFields will stand. + // +optional + OmitManagedFields *bool `json:"omitManagedFields,omitempty" protobuf:"varint,9,opt,name=omitManagedFields"` } // GroupResources represents resource kinds in an API group. diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go index 0aac32119..33bfc76e3 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -257,6 +258,7 @@ func autoConvert_v1beta1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules)) out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = in.OmitManagedFields return nil } @@ -269,6 +271,7 @@ func autoConvert_audit_Policy_To_v1beta1_Policy(in *audit.Policy, out *Policy, s out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = in.OmitManagedFields return nil } @@ -308,6 +311,7 @@ func autoConvert_v1beta1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *aud out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = (*bool)(unsafe.Pointer(in.OmitManagedFields)) return nil } @@ -325,6 +329,7 @@ func autoConvert_audit_PolicyRule_To_v1beta1_PolicyRule(in *audit.PolicyRule, ou out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = (*bool)(unsafe.Pointer(in.OmitManagedFields)) return nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go index 5adbd5a78..ee7f8b90e 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -279,6 +280,11 @@ func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { *out = make([]Stage, len(*in)) copy(*out, *in) } + if in.OmitManagedFields != nil { + in, out := &in.OmitManagedFields, &out.OmitManagedFields + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go index 73e63fc11..198b5be4a 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.prerelease-lifecycle.go index e475d4c2e..0a0dc6669 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.prerelease-lifecycle.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go index a210f631b..81d5add47 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -277,6 +278,11 @@ func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { *out = make([]Stage, len(*in)) copy(*out, *in) } + if in.OmitManagedFields != nil { + in, out := &in.OmitManagedFields, &out.OmitManagedFields + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go index c7de6539d..ff2132f72 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go index dcb4e8552..3d2ac484b 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go index 1c8db8d04..82fec0111 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go index dd66315ee..13e5cffca 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go index dbb932aa3..8ae15a0ec 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go +++ b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go @@ -19,7 +19,7 @@ package bootstrap import ( coordinationv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" - flowcontrol "k8s.io/api/flowcontrol/v1beta1" + flowcontrol "k8s.io/api/flowcontrol/v1beta2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" diff --git a/vendor/k8s.io/apiserver/pkg/audit/context.go b/vendor/k8s.io/apiserver/pkg/audit/context.go index 3d616bbd4..eada7add9 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/context.go +++ b/vendor/k8s.io/apiserver/pkg/audit/context.go @@ -19,6 +19,7 @@ package audit import ( "context" + auditinternal "k8s.io/apiserver/pkg/apis/audit" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" ) @@ -27,7 +28,15 @@ type key int const ( // auditAnnotationsKey is the context key for the audit annotations. + // TODO: it's wasteful to store the audit annotations under a separate key, we + // copy the request context twice for audit purposes. We should move the audit + // annotations under AuditContext so we can get rid of the additional request + // context copy. auditAnnotationsKey key = iota + + // auditKey is the context key for storing the audit event that is being + // captured and the evaluated policy that applies to the given request. + auditKey ) // annotations = *[]annotation instead of a map to preserve order of insertions @@ -59,7 +68,7 @@ func WithAuditAnnotations(parent context.Context) context.Context { // prefer AddAuditAnnotation over LogAnnotation to avoid dropping annotations. func AddAuditAnnotation(ctx context.Context, key, value string) { // use the audit event directly if we have it - if ae := genericapirequest.AuditEventFrom(ctx); ae != nil { + if ae := AuditEventFrom(ctx); ae != nil { LogAnnotation(ae, key, value) return } @@ -82,3 +91,26 @@ func auditAnnotationsFrom(ctx context.Context) []annotation { return *annotations } + +// WithAuditContext returns a new context that stores the pair of the audit +// configuration object that applies to the given request and +// the audit event that is going to be written to the API audit log. +func WithAuditContext(parent context.Context, ev *AuditContext) context.Context { + return genericapirequest.WithValue(parent, auditKey, ev) +} + +// AuditEventFrom returns the audit event struct on the ctx +func AuditEventFrom(ctx context.Context) *auditinternal.Event { + if o := AuditContextFrom(ctx); o != nil { + return o.Event + } + return nil +} + +// AuditContextFrom returns the pair of the audit configuration object +// that applies to the given request and the audit event that is going to +// be written to the API audit log. +func AuditContextFrom(ctx context.Context) *AuditContext { + ev, _ := ctx.Value(auditKey).(*AuditContext) + return ev +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/evaluator.go b/vendor/k8s.io/apiserver/pkg/audit/evaluator.go new file mode 100644 index 000000000..d5e40428c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/evaluator.go @@ -0,0 +1,65 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// AuditContext is a pair of the audit configuration object that applies to +// a given request and the audit Event object that is being captured. +// It's a convenient placeholder to store both these objects in the request context. +type AuditContext struct { + // RequestAuditConfig is the audit configuration that applies to the request + RequestAuditConfig RequestAuditConfig + + // Event is the audit Event object that is being captured to be written in + // the API audit log. It is set to nil when the request is not being audited. + Event *audit.Event +} + +// RequestAuditConfig is the evaluated audit configuration that is applicable to +// a given request. PolicyRuleEvaluator evaluates the audit policy against the +// authorizer attributes and returns a RequestAuditConfig that applies to the request. +type RequestAuditConfig struct { + // OmitStages is the stages that need to be omitted from being audited. + OmitStages []audit.Stage + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + OmitManagedFields bool +} + +// RequestAuditConfigWithLevel includes Level at which the request is being audited. +// PolicyRuleEvaluator evaluates the audit configuration for a request +// against the authorizer attributes and returns an RequestAuditConfigWithLevel +// that applies to the request. +type RequestAuditConfigWithLevel struct { + RequestAuditConfig + + // Level at which the request is being audited at + Level audit.Level +} + +// PolicyRuleEvaluator exposes methods for evaluating the policy rules. +type PolicyRuleEvaluator interface { + // EvaluatePolicyRule evaluates the audit policy of the apiserver against + // the given authorizer attributes and returns the audit configuration that + // is applicable to the given equest. + EvaluatePolicyRule(authorizer.Attributes) RequestAuditConfigWithLevel +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go b/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go index e9a901abf..6a98ff4ac 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go @@ -20,6 +20,7 @@ import ( "strings" "k8s.io/apiserver/pkg/apis/audit" + auditinternal "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authorization/authorizer" ) @@ -28,18 +29,12 @@ const ( DefaultAuditLevel = audit.LevelNone ) -// Checker exposes methods for checking the policy rules. -type Checker interface { - // Check the audit level for a request with the given authorizer attributes. - LevelAndStages(authorizer.Attributes) (audit.Level, []audit.Stage) -} - -// NewChecker creates a new policy checker. -func NewChecker(policy *audit.Policy) Checker { +// NewPolicyRuleEvaluator creates a new policy rule evaluator. +func NewPolicyRuleEvaluator(policy *audit.Policy) auditinternal.PolicyRuleEvaluator { for i, rule := range policy.Rules { policy.Rules[i].OmitStages = unionStages(policy.OmitStages, rule.OmitStages) } - return &policyChecker{*policy} + return &policyRuleEvaluator{*policy} } func unionStages(stageLists ...[]audit.Stage) []audit.Stage { @@ -56,22 +51,48 @@ func unionStages(stageLists ...[]audit.Stage) []audit.Stage { return result } -// FakeChecker creates a checker that returns a constant level for all requests (for testing). -func FakeChecker(level audit.Level, stage []audit.Stage) Checker { - return &fakeChecker{level, stage} +// NewFakePolicyRuleEvaluator creates a fake policy rule evaluator that returns +// a constant level for all requests (for testing). +func NewFakePolicyRuleEvaluator(level audit.Level, stage []audit.Stage) auditinternal.PolicyRuleEvaluator { + return &fakePolicyRuleEvaluator{level, stage} } -type policyChecker struct { +type policyRuleEvaluator struct { audit.Policy } -func (p *policyChecker) LevelAndStages(attrs authorizer.Attributes) (audit.Level, []audit.Stage) { +func (p *policyRuleEvaluator) EvaluatePolicyRule(attrs authorizer.Attributes) auditinternal.RequestAuditConfigWithLevel { for _, rule := range p.Rules { if ruleMatches(&rule, attrs) { - return rule.Level, rule.OmitStages + return auditinternal.RequestAuditConfigWithLevel{ + Level: rule.Level, + RequestAuditConfig: auditinternal.RequestAuditConfig{ + OmitStages: rule.OmitStages, + OmitManagedFields: isOmitManagedFields(&rule, p.OmitManagedFields), + }, + } } } - return DefaultAuditLevel, p.OmitStages + + return auditinternal.RequestAuditConfigWithLevel{ + Level: DefaultAuditLevel, + RequestAuditConfig: auditinternal.RequestAuditConfig{ + OmitStages: p.OmitStages, + OmitManagedFields: p.OmitManagedFields, + }, + } +} + +// isOmitManagedFields returns whether to omit managed fields from the request +// and response bodies from being written to the API audit log. +// If a user specifies OmitManagedFields inside a policy rule, that overrides +// the global policy default in Policy.OmitManagedFields. +func isOmitManagedFields(policyRule *audit.PolicyRule, policyDefault bool) bool { + if policyRule.OmitManagedFields == nil { + return policyDefault + } + + return *policyRule.OmitManagedFields } // Check whether the rule matches the request attrs. @@ -209,11 +230,16 @@ func hasString(slice []string, value string) bool { return false } -type fakeChecker struct { +type fakePolicyRuleEvaluator struct { level audit.Level stage []audit.Stage } -func (f *fakeChecker) LevelAndStages(_ authorizer.Attributes) (audit.Level, []audit.Stage) { - return f.level, f.stage +func (f *fakePolicyRuleEvaluator) EvaluatePolicyRule(_ authorizer.Attributes) auditinternal.RequestAuditConfigWithLevel { + return auditinternal.RequestAuditConfigWithLevel{ + Level: f.level, + RequestAuditConfig: auditinternal.RequestAuditConfig{ + OmitStages: f.stage, + }, + } } diff --git a/vendor/k8s.io/apiserver/pkg/audit/request.go b/vendor/k8s.io/apiserver/pkg/audit/request.go index 29ad4b72b..cdfd535f7 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/request.go +++ b/vendor/k8s.io/apiserver/pkg/audit/request.go @@ -18,6 +18,7 @@ package audit import ( "bytes" + "context" "fmt" "net/http" "reflect" @@ -111,7 +112,8 @@ func LogImpersonatedUser(ae *auditinternal.Event, user user.Info) { // LogRequestObject fills in the request object into an audit event. The passed runtime.Object // will be converted to the given gv. -func LogRequestObject(ae *auditinternal.Event, obj runtime.Object, objGV schema.GroupVersion, gvr schema.GroupVersionResource, subresource string, s runtime.NegotiatedSerializer) { +func LogRequestObject(ctx context.Context, obj runtime.Object, objGV schema.GroupVersion, gvr schema.GroupVersionResource, subresource string, s runtime.NegotiatedSerializer) { + ae := AuditEventFrom(ctx) if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { return } @@ -151,6 +153,16 @@ func LogRequestObject(ae *auditinternal.Event, obj runtime.Object, objGV schema. return } + if shouldOmitManagedFields(ctx) { + copy, ok, err := copyWithoutManagedFields(obj) + if err != nil { + klog.Warningf("error while dropping managed fields from the request for %q error: %v", reflect.TypeOf(obj).Name(), err) + } + if ok { + obj = copy + } + } + // TODO(audit): hook into the serializer to avoid double conversion var err error ae.RequestObject, err = encodeObject(obj, objGV, s) @@ -162,7 +174,8 @@ func LogRequestObject(ae *auditinternal.Event, obj runtime.Object, objGV schema. } // LogRequestPatch fills in the given patch as the request object into an audit event. -func LogRequestPatch(ae *auditinternal.Event, patch []byte) { +func LogRequestPatch(ctx context.Context, patch []byte) { + ae := AuditEventFrom(ctx) if ae == nil || ae.Level.Less(auditinternal.LevelRequest) { return } @@ -175,7 +188,8 @@ func LogRequestPatch(ae *auditinternal.Event, patch []byte) { // LogResponseObject fills in the response object into an audit event. The passed runtime.Object // will be converted to the given gv. -func LogResponseObject(ae *auditinternal.Event, obj runtime.Object, gv schema.GroupVersion, s runtime.NegotiatedSerializer) { +func LogResponseObject(ctx context.Context, obj runtime.Object, gv schema.GroupVersion, s runtime.NegotiatedSerializer) { + ae := AuditEventFrom(ctx) if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { return } @@ -191,6 +205,17 @@ func LogResponseObject(ae *auditinternal.Event, obj runtime.Object, gv schema.Gr if ae.Level.Less(auditinternal.LevelRequestResponse) { return } + + if shouldOmitManagedFields(ctx) { + copy, ok, err := copyWithoutManagedFields(obj) + if err != nil { + klog.Warningf("error while dropping managed fields from the response for %q error: %v", reflect.TypeOf(obj).Name(), err) + } + if ok { + obj = copy + } + } + // TODO(audit): hook into the serializer to avoid double conversion var err error ae.ResponseObject, err = encodeObject(obj, gv, s) @@ -242,3 +267,72 @@ func maybeTruncateUserAgent(req *http.Request) string { return ua } + +// copyWithoutManagedFields will make a deep copy of the specified object and +// will discard the managed fields from the copy. +// The specified object is expected to be a meta.Object or a "list". +// The specified object obj is treated as readonly and hence not mutated. +// On return, an error is set if the function runs into any error while +// removing the managed fields, the boolean value is true if the copy has +// been made successfully, otherwise false. +func copyWithoutManagedFields(obj runtime.Object) (runtime.Object, bool, error) { + isAccessor := true + if _, err := meta.Accessor(obj); err != nil { + isAccessor = false + } + isList := meta.IsListType(obj) + _, isTable := obj.(*metav1.Table) + if !isAccessor && !isList && !isTable { + return nil, false, nil + } + + // TODO a deep copy isn't really needed here, figure out how we can reliably + // use shallow copy here to omit the manageFields. + copy := obj.DeepCopyObject() + + if isAccessor { + if err := removeManagedFields(copy); err != nil { + return nil, false, err + } + } + + if isList { + if err := meta.EachListItem(copy, removeManagedFields); err != nil { + return nil, false, err + } + } + + if isTable { + table := copy.(*metav1.Table) + for i := range table.Rows { + rowObj := table.Rows[i].Object + if err := removeManagedFields(rowObj.Object); err != nil { + return nil, false, err + } + } + } + + return copy, true, nil +} + +func removeManagedFields(obj runtime.Object) error { + if obj == nil { + return nil + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + accessor.SetManagedFields(nil) + return nil +} + +func shouldOmitManagedFields(ctx context.Context) bool { + if auditContext := AuditContextFrom(ctx); auditContext != nil { + return auditContext.RequestAuditConfig.OmitManagedFields + } + + // If we can't decide, return false to maintain current behavior which is + // to retain the manage fields in the audit. + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go index 8e0520af1..a2c5ce680 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go @@ -20,7 +20,7 @@ import ( "time" utilcache "k8s.io/apimachinery/pkg/util/cache" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" ) type simpleCache struct { diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go index b0d06a231..787d926b4 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go @@ -33,12 +33,11 @@ import ( "golang.org/x/sync/singleflight" apierrors "k8s.io/apimachinery/pkg/api/errors" - utilclock "k8s.io/apimachinery/pkg/util/clock" auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authentication/authenticator" - "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) var errAuthnCrash = apierrors.NewInternalError(errors.New("authentication failed unexpectedly")) @@ -89,10 +88,10 @@ type cache interface { // New returns a token authenticator that caches the results of the specified authenticator. A ttl of 0 bypasses the cache. func New(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration) authenticator.Token { - return newWithClock(authenticator, cacheErrs, successTTL, failureTTL, utilclock.RealClock{}) + return newWithClock(authenticator, cacheErrs, successTTL, failureTTL, clock.RealClock{}) } -func newWithClock(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration, clock utilclock.Clock) authenticator.Token { +func newWithClock(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration, clock clock.Clock) authenticator.Token { randomCacheKey := make([]byte, 32) if _, err := rand.Read(randomCacheKey); err != nil { panic(err) // rand should never fail @@ -189,7 +188,7 @@ func (a *cachedTokenAuthenticator) doAuthenticateToken(ctx context.Context, toke // since this is shared work between multiple requests, we have no way of knowing if any // particular request supports audit annotations. thus we always attempt to record them. ev := &auditinternal.Event{Level: auditinternal.LevelMetadata} - ctx = request.WithAuditEvent(ctx, ev) + ctx = audit.WithAuditContext(ctx, &audit.AuditContext{Event: ev}) record.resp, record.ok, record.err = a.authenticator.AuthenticateToken(ctx, token) record.annotations = ev.Annotations diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go b/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go index 07880e5e2..40e32a1a1 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go @@ -18,14 +18,15 @@ package filterlatency import ( "context" + "fmt" "net/http" "time" - utilclock "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/server/httplog" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) type requestFilterRecordKeyType int @@ -54,22 +55,22 @@ func requestFilterRecordFrom(ctx context.Context) *requestFilterRecord { // TrackStarted measures the timestamp the given handler has started execution // by attaching a handler to the chain. func TrackStarted(handler http.Handler, name string) http.Handler { - return trackStarted(handler, name, utilclock.RealClock{}) + return trackStarted(handler, name, clock.RealClock{}) } // TrackCompleted measures the timestamp the given handler has completed execution and then // it updates the corresponding metric with the filter latency duration. func TrackCompleted(handler http.Handler) http.Handler { - return trackCompleted(handler, utilclock.RealClock{}, func(ctx context.Context, fr *requestFilterRecord, completedAt time.Time) { + return trackCompleted(handler, clock.RealClock{}, func(ctx context.Context, fr *requestFilterRecord, completedAt time.Time) { latency := completedAt.Sub(fr.startedTimestamp) metrics.RecordFilterLatency(ctx, fr.name, latency) if klog.V(3).Enabled() && latency > minFilterLatencyToLog { - httplog.AddInfof(ctx, "%s=%s", fr.name, latency.String()) + httplog.AddKeyValue(ctx, fmt.Sprintf("fl_%s", fr.name), latency.String()) } }) } -func trackStarted(handler http.Handler, name string, clock utilclock.PassiveClock) http.Handler { +func trackStarted(handler http.Handler, name string, clock clock.PassiveClock) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() if fr := requestFilterRecordFrom(ctx); fr != nil { @@ -89,7 +90,7 @@ func trackStarted(handler http.Handler, name string, clock utilclock.PassiveCloc }) } -func trackCompleted(handler http.Handler, clock utilclock.PassiveClock, action func(context.Context, *requestFilterRecord, time.Time)) http.Handler { +func trackCompleted(handler http.Handler, clock clock.PassiveClock, action func(context.Context, *requestFilterRecord, time.Time)) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // The previous filter has just completed. completedAt := clock.Now() diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go index 02f57909e..5a97c7d47 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go @@ -30,32 +30,38 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/audit" - "k8s.io/apiserver/pkg/audit/policy" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/endpoints/responsewriter" ) // WithAudit decorates a http.Handler with audit logging information for all the // requests coming to the server. Audit level is decided according to requests' // attributes and audit policy. Logs are emitted to the audit sink to // process events. If sink or audit policy is nil, no decoration takes place. -func WithAudit(handler http.Handler, sink audit.Sink, policy policy.Checker, longRunningCheck request.LongRunningRequestCheck) http.Handler { +func WithAudit(handler http.Handler, sink audit.Sink, policy audit.PolicyRuleEvaluator, longRunningCheck request.LongRunningRequestCheck) http.Handler { if sink == nil || policy == nil { return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - req, ev, omitStages, err := createAuditEventAndAttachToContext(req, policy) + auditContext, err := evaluatePolicyAndCreateAuditEvent(req, policy) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) responsewriters.InternalError(w, req, errors.New("failed to create audit event")) return } - ctx := req.Context() - if ev == nil || ctx == nil { + + ev := auditContext.Event + if ev == nil || req.Context() == nil { handler.ServeHTTP(w, req) return } + req = req.WithContext(audit.WithAuditContext(req.Context(), auditContext)) + + ctx := req.Context() + omitStages := auditContext.RequestAuditConfig.OmitStages + ev.Stage = auditinternal.StageRequestReceived if processed := processAuditEvent(ctx, sink, ev, omitStages); !processed { audit.ApiserverAuditDroppedCounter.WithContext(ctx).Inc() @@ -112,38 +118,40 @@ func WithAudit(handler http.Handler, sink audit.Sink, policy policy.Checker, lon }) } -// createAuditEventAndAttachToContext is responsible for creating the audit event -// and attaching it to the appropriate request context. It returns: -// - context with audit event attached to it -// - created audit event +// evaluatePolicyAndCreateAuditEvent is responsible for evaluating the audit +// policy configuration applicable to the request and create a new audit +// event that will be written to the API audit log. // - error if anything bad happened -func createAuditEventAndAttachToContext(req *http.Request, policy policy.Checker) (*http.Request, *auditinternal.Event, []auditinternal.Stage, error) { +func evaluatePolicyAndCreateAuditEvent(req *http.Request, policy audit.PolicyRuleEvaluator) (*audit.AuditContext, error) { ctx := req.Context() attribs, err := GetAuthorizerAttributes(ctx) if err != nil { - return req, nil, nil, fmt.Errorf("failed to GetAuthorizerAttributes: %v", err) + return nil, fmt.Errorf("failed to GetAuthorizerAttributes: %v", err) } - level, omitStages := policy.LevelAndStages(attribs) - audit.ObservePolicyLevel(ctx, level) - if level == auditinternal.LevelNone { + ls := policy.EvaluatePolicyRule(attribs) + audit.ObservePolicyLevel(ctx, ls.Level) + if ls.Level == auditinternal.LevelNone { // Don't audit. - return req, nil, nil, nil + return &audit.AuditContext{ + RequestAuditConfig: ls.RequestAuditConfig, + }, nil } requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(ctx) if !ok { requestReceivedTimestamp = time.Now() } - ev, err := audit.NewEventFromRequest(req, requestReceivedTimestamp, level, attribs) + ev, err := audit.NewEventFromRequest(req, requestReceivedTimestamp, ls.Level, attribs) if err != nil { - return req, nil, nil, fmt.Errorf("failed to complete audit event from request: %v", err) + return nil, fmt.Errorf("failed to complete audit event from request: %v", err) } - req = req.WithContext(request.WithAuditEvent(ctx, ev)) - - return req, ev, omitStages, nil + return &audit.AuditContext{ + RequestAuditConfig: ls.RequestAuditConfig, + Event: ev, + }, nil } func processAuditEvent(ctx context.Context, sink audit.Sink, ev *auditinternal.Event, omitStages []auditinternal.Stage) bool { @@ -171,19 +179,11 @@ func decorateResponseWriter(ctx context.Context, responseWriter http.ResponseWri omitStages: omitStages, } - // check if the ResponseWriter we're wrapping is the fancy one we need - // or if the basic is sufficient - //lint:file-ignore SA1019 Keep supporting deprecated http.CloseNotifier - _, cn := responseWriter.(http.CloseNotifier) - _, fl := responseWriter.(http.Flusher) - _, hj := responseWriter.(http.Hijacker) - if cn && fl && hj { - return &fancyResponseWriterDelegator{delegate} - } - return delegate + return responsewriter.WrapForHTTP1Or2(delegate) } var _ http.ResponseWriter = &auditResponseWriter{} +var _ responsewriter.UserProvidedDecorator = &auditResponseWriter{} // auditResponseWriter intercepts WriteHeader, sets it in the event. If the sink is set, it will // create immediately an event (for long running requests). @@ -196,6 +196,10 @@ type auditResponseWriter struct { omitStages []auditinternal.Stage } +func (a *auditResponseWriter) Unwrap() http.ResponseWriter { + return a.ResponseWriter +} + func (a *auditResponseWriter) processCode(code int) { a.once.Do(func() { if a.event.ResponseStatus == nil { @@ -221,28 +225,11 @@ func (a *auditResponseWriter) WriteHeader(code int) { a.ResponseWriter.WriteHeader(code) } -// fancyResponseWriterDelegator implements http.CloseNotifier, http.Flusher and -// http.Hijacker which are needed to make certain http operation (e.g. watch, rsh, etc) -// working. -type fancyResponseWriterDelegator struct { - *auditResponseWriter -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { +func (a *auditResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { // fake a response status before protocol switch happens - f.processCode(http.StatusSwitchingProtocols) + a.processCode(http.StatusSwitchingProtocols) - return f.ResponseWriter.(http.Hijacker).Hijack() + // the outer ResponseWriter object returned by WrapForHTTP1Or2 implements + // http.Hijacker if the inner object (a.ResponseWriter) implements http.Hijacker. + return a.ResponseWriter.(http.Hijacker).Hijack() } - -var _ http.CloseNotifier = &fancyResponseWriterDelegator{} -var _ http.Flusher = &fancyResponseWriterDelegator{} -var _ http.Hijacker = &fancyResponseWriterDelegator{} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go index 22b276991..6a885711f 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_annotations.go @@ -20,14 +20,13 @@ import ( "net/http" "k8s.io/apiserver/pkg/audit" - "k8s.io/apiserver/pkg/audit/policy" ) // WithAuditAnnotations decorates a http.Handler with a []{key, value} that is merged // with the audit.Event.Annotations map. This allows layers that run before WithAudit // (such as authentication) to assert annotations. // If sink or audit policy is nil, no decoration takes place. -func WithAuditAnnotations(handler http.Handler, sink audit.Sink, policy policy.Checker) http.Handler { +func WithAuditAnnotations(handler http.Handler, sink audit.Sink, policy audit.PolicyRuleEvaluator) http.Handler { // no need to wrap if auditing is disabled if sink == nil || policy == nil { return handler diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go index 2de13f747..fc9340b10 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go @@ -26,33 +26,36 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/audit" - "k8s.io/apiserver/pkg/audit/policy" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" ) // WithFailedAuthenticationAudit decorates a failed http.Handler used in WithAuthentication handler. // It is meant to log only failed authentication requests. -func WithFailedAuthenticationAudit(failedHandler http.Handler, sink audit.Sink, policy policy.Checker) http.Handler { +func WithFailedAuthenticationAudit(failedHandler http.Handler, sink audit.Sink, policy audit.PolicyRuleEvaluator) http.Handler { if sink == nil || policy == nil { return failedHandler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - req, ev, omitStages, err := createAuditEventAndAttachToContext(req, policy) + a, err := evaluatePolicyAndCreateAuditEvent(req, policy) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) responsewriters.InternalError(w, req, errors.New("failed to create audit event")) return } + + ev := a.Event if ev == nil { failedHandler.ServeHTTP(w, req) return } + req = req.WithContext(audit.WithAuditContext(req.Context(), a)) + ev.ResponseStatus = &metav1.Status{} ev.ResponseStatus.Message = getAuthMethods(req) ev.Stage = auditinternal.StageResponseStarted - rw := decorateResponseWriter(req.Context(), w, ev, sink, omitStages) + rw := decorateResponseWriter(req.Context(), w, ev, sink, a.RequestAuditConfig.OmitStages) failedHandler.ServeHTTP(rw, req) }) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go index 968349381..395d9332a 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -49,7 +49,7 @@ func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime. } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() - ae := request.AuditEventFrom(ctx) + ae := audit.AuditEventFrom(ctx) attributes, err := GetAuthorizerAttributes(ctx) if err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index d82ad68a7..1dc1fe4a5 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -166,7 +166,7 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. oldUser, _ := request.UserFrom(ctx) httplog.LogOf(req, w).Addf("%v is acting as %v", oldUser, newUser) - ae := request.AuditEventFrom(ctx) + ae := audit.AuditEventFrom(ctx) audit.LogImpersonatedUser(ae, newUser) // clear all the impersonation headers from the request diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/mux_discovery_complete.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/mux_discovery_complete.go new file mode 100644 index 000000000..d2fee3b15 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/mux_discovery_complete.go @@ -0,0 +1,64 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "context" + "net/http" +) + +type muxAndDiscoveryIncompleteKeyType int + +const ( + // muxAndDiscoveryIncompleteKey is a key under which a protection signal for all requests made before the server have installed all known HTTP paths is stored in the request's context + muxAndDiscoveryIncompleteKey muxAndDiscoveryIncompleteKeyType = iota +) + +// NoMuxAndDiscoveryIncompleteKey checks if the context contains muxAndDiscoveryIncompleteKey. +// The presence of the key indicates the request has been made when the HTTP paths weren't installed. +func NoMuxAndDiscoveryIncompleteKey(ctx context.Context) bool { + muxAndDiscoveryCompleteProtectionKeyValue, _ := ctx.Value(muxAndDiscoveryIncompleteKey).(string) + return len(muxAndDiscoveryCompleteProtectionKeyValue) == 0 +} + +// WithMuxAndDiscoveryComplete puts the muxAndDiscoveryIncompleteKey in the context if a request has been made before muxAndDiscoveryCompleteSignal has been ready. +// Putting the key protect us from returning a 404 response instead of a 503. +// It is especially important for controllers like GC and NS since they act on 404s. +// +// The presence of the key is checked in the NotFoundHandler (staging/src/k8s.io/apiserver/pkg/util/notfoundhandler/not_found_handler.go) +// +// The primary reason this filter exists is to protect from a potential race between the client's requests reaching the NotFoundHandler and the server becoming ready. +// Without the protection key a request could still get a 404 response when the registered signals changed their status just slightly before reaching the new handler. +// In that case, the presence of the key will make the handler return a 503 instead of a 404. +func WithMuxAndDiscoveryComplete(handler http.Handler, muxAndDiscoveryCompleteSignal <-chan struct{}) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if muxAndDiscoveryCompleteSignal != nil && !isClosed(muxAndDiscoveryCompleteSignal) { + req = req.WithContext(context.WithValue(req.Context(), muxAndDiscoveryIncompleteKey, "MuxAndDiscoveryInstallationNotComplete")) + } + handler.ServeHTTP(w, req) + }) +} + +// isClosed is a convenience function that simply check if the given chan has been closed +func isClosed(ch <-chan struct{}) bool { + select { + case <-ch: + return true + default: + return false + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go index cc4997968..84255d525 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go @@ -27,14 +27,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - utilclock "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/audit" - "k8s.io/apiserver/pkg/audit/policy" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) const ( @@ -47,13 +46,13 @@ const ( // auditWrapper provides an http.Handler that audits a failed request. // longRunning returns true if he given request is a long running request. // requestTimeoutMaximum specifies the default request timeout value. -func WithRequestDeadline(handler http.Handler, sink audit.Sink, policy policy.Checker, longRunning request.LongRunningRequestCheck, +func WithRequestDeadline(handler http.Handler, sink audit.Sink, policy audit.PolicyRuleEvaluator, longRunning request.LongRunningRequestCheck, negotiatedSerializer runtime.NegotiatedSerializer, requestTimeoutMaximum time.Duration) http.Handler { - return withRequestDeadline(handler, sink, policy, longRunning, negotiatedSerializer, requestTimeoutMaximum, utilclock.RealClock{}) + return withRequestDeadline(handler, sink, policy, longRunning, negotiatedSerializer, requestTimeoutMaximum, clock.RealClock{}) } -func withRequestDeadline(handler http.Handler, sink audit.Sink, policy policy.Checker, longRunning request.LongRunningRequestCheck, - negotiatedSerializer runtime.NegotiatedSerializer, requestTimeoutMaximum time.Duration, clock utilclock.PassiveClock) http.Handler { +func withRequestDeadline(handler http.Handler, sink audit.Sink, policy audit.PolicyRuleEvaluator, longRunning request.LongRunningRequestCheck, + negotiatedSerializer runtime.NegotiatedSerializer, requestTimeoutMaximum time.Duration, clock clock.PassiveClock) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() @@ -104,29 +103,33 @@ func withRequestDeadline(handler http.Handler, sink audit.Sink, policy policy.Ch // withFailedRequestAudit decorates a failed http.Handler and is used to audit a failed request. // statusErr is used to populate the Message property of ResponseStatus. -func withFailedRequestAudit(failedHandler http.Handler, statusErr *apierrors.StatusError, sink audit.Sink, policy policy.Checker) http.Handler { +func withFailedRequestAudit(failedHandler http.Handler, statusErr *apierrors.StatusError, sink audit.Sink, policy audit.PolicyRuleEvaluator) http.Handler { if sink == nil || policy == nil { return failedHandler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - req, ev, omitStages, err := createAuditEventAndAttachToContext(req, policy) + a, err := evaluatePolicyAndCreateAuditEvent(req, policy) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) responsewriters.InternalError(w, req, errors.New("failed to create audit event")) return } + + ev := a.Event if ev == nil { failedHandler.ServeHTTP(w, req) return } + req = req.WithContext(audit.WithAuditContext(req.Context(), a)) + ev.ResponseStatus = &metav1.Status{} ev.Stage = auditinternal.StageResponseStarted if statusErr != nil { ev.ResponseStatus.Message = statusErr.Error() } - rw := decorateResponseWriter(req.Context(), w, ev, sink, omitStages) + rw := decorateResponseWriter(req.Context(), w, ev, sink, a.RequestAuditConfig.OmitStages) failedHandler.ServeHTTP(rw, req) }) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go index d9e7369f6..576bb3e7a 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go @@ -19,18 +19,18 @@ package filters import ( "net/http" - utilclock "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/utils/clock" ) // WithRequestReceivedTimestamp attaches the ReceivedTimestamp (the time the request reached // the apiserver) to the context. func WithRequestReceivedTimestamp(handler http.Handler) http.Handler { - return withRequestReceivedTimestampWithClock(handler, utilclock.RealClock{}) + return withRequestReceivedTimestampWithClock(handler, clock.RealClock{}) } // The clock is passed as a parameter, handy for unit testing. -func withRequestReceivedTimestampWithClock(handler http.Handler, clock utilclock.PassiveClock) http.Handler { +func withRequestReceivedTimestampWithClock(handler http.Handler, clock clock.PassiveClock) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() req = req.WithContext(request.WithReceivedTimestamp(ctx, clock.Now())) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/webhook_duration.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/webhook_duration.go new file mode 100644 index 000000000..e81b340eb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/webhook_duration.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + + "k8s.io/apiserver/pkg/endpoints/request" +) + +// WithWebhookDuration adds WebhookDuration trackers to the +// context associated with a request. +func WithWebhookDuration(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + req = req.WithContext(request.WithWebhookDuration(ctx)) + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go index d5f7c414b..21f0b0273 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -43,6 +43,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog/v2" utiltrace "k8s.io/utils/trace" ) @@ -92,8 +93,6 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int return } - decoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion) - body, err := limitedReadBody(req, scope.MaxRequestBodyBytes) if err != nil { scope.err(err, w, req) @@ -116,12 +115,29 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int defaultGVK := scope.Kind original := r.New() + + validationDirective := fieldValidation(options.FieldValidation) + decodeSerializer := s.Serializer + if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict { + decodeSerializer = s.StrictSerializer + } + + decoder := scope.Serializer.DecoderToVersion(decodeSerializer, scope.HubGroupVersion) trace.Step("About to convert to expected version") obj, gvk, err := decoder.Decode(body, &defaultGVK, original) if err != nil { - err = transformDecodeError(scope.Typer, err, original, gvk, body) - scope.err(err, w, req) - return + strictError, isStrictError := runtime.AsStrictDecodingError(err) + switch { + case isStrictError && obj != nil && validationDirective == metav1.FieldValidationWarn: + addStrictDecodingWarnings(req.Context(), strictError.Errors()) + case isStrictError && validationDirective == metav1.FieldValidationIgnore: + klog.Warningf("unexpected strict error when field validation is set to ignore") + fallthrough + default: + err = transformDecodeError(scope.Typer, err, original, gvk, body) + scope.err(err, w, req) + return + } } objGV := gvk.GroupVersion() @@ -141,9 +157,9 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int } ctx = request.WithNamespace(ctx, namespace) - ae := request.AuditEventFrom(ctx) + ae := audit.AuditEventFrom(ctx) admit = admission.WithAudit(admit, ae) - audit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer) + audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, scope.Serializer) userInfo, _ := request.UserFrom(ctx) @@ -195,7 +211,7 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int code := http.StatusCreated status, ok := result.(*metav1.Status) - if ok && err == nil && status.Code == 0 { + if ok && status.Code == 0 { status.Code = int32(code) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go index 9b2b9b60a..05ccd3930 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -67,7 +67,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc defer cancel() ctx = request.WithNamespace(ctx, namespace) - ae := request.AuditEventFrom(ctx) + ae := audit.AuditEventFrom(ctx) admit = admission.WithAudit(admit, ae) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) @@ -103,9 +103,8 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc } trace.Step("Decoded delete options") - ae := request.AuditEventFrom(ctx) objGV := gvk.GroupVersion() - audit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer) + audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, scope.Serializer) trace.Step("Recorded the audit event") } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { @@ -144,7 +143,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc // that will break existing clients. // Other cases where resource is not instantly deleted are: namespace deletion // and pod graceful deletion. - //lint:ignore SA1019 backwards compatibility + //nolint:staticcheck // SA1019 backwards compatibility //nolint: staticcheck if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents { status = http.StatusAccepted @@ -189,7 +188,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc defer cancel() ctx = request.WithNamespace(ctx, namespace) - ae := request.AuditEventFrom(ctx) + ae := audit.AuditEventFrom(ctx) outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope) if err != nil { @@ -250,9 +249,8 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc return } - ae := request.AuditEventFrom(ctx) objGV := gvk.GroupVersion() - audit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer) + audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, scope.Serializer) } else { if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil { err = errors.NewBadRequest(err.Error()) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go index a4023a0e7..dde120d15 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go @@ -56,10 +56,18 @@ type Manager interface { // Update is used when the object has already been merged (non-apply // use-case), and simply updates the managed fields in the output // object. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) // Apply is used when server-side apply is called, as it merges the // object and updates the managed fields. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) } @@ -173,7 +181,6 @@ func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (o return newObj, nil } - internal.RemoveObjectManagedFields(liveObj) internal.RemoveObjectManagedFields(newObj) if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { @@ -194,8 +201,15 @@ func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager st obj, err := f.Update(liveObj, newObj, manager) if err != nil { atMostEverySecond.Do(func() { + ns, name := "unknown", "unknown" + accessor, err := meta.Accessor(newObj) + if err == nil { + ns = accessor.GetNamespace() + name = accessor.GetName() + } + klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "VersionKind", - newObj.GetObjectKind().GroupVersionKind()) + newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name) }) // Explicitly remove managedFields on failure, so that // we can't have garbage in it. @@ -235,8 +249,6 @@ func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, return nil, fmt.Errorf("failed to decode managed fields: %v", err) } - internal.RemoveObjectManagedFields(liveObj) - object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) if err != nil { if conflicts, ok := err.(merge.Conflicts); ok { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go index 9bee82a85..83f92b8cc 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) @@ -80,7 +81,8 @@ func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()} } if object == nil { - object = liveObj + object = liveObj.DeepCopyObject() + internal.RemoveObjectManagedFields(object) } return object, managed, nil } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go index 1be9e763a..213988e23 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go @@ -72,30 +72,41 @@ func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter r }, nil } +func objectGVKNN(obj runtime.Object) string { + name := "" + namespace := "" + if accessor, err := meta.Accessor(obj); err == nil { + name = accessor.GetName() + namespace = accessor.GetNamespace() + } + + return fmt.Sprintf("%v/%v; %v", namespace, name, obj.GetObjectKind().GroupVersionKind()) +} + // Update implements Manager. func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { newObjVersioned, err := f.toVersioned(newObj) if err != nil { - return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v (from %v to %v)", err, newObj.GetObjectKind().GroupVersionKind(), f.groupVersion) + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version (%v): %v", objectGVKNN(newObj), f.groupVersion, err) } liveObjVersioned, err := f.toVersioned(liveObj) if err != nil { - return nil, nil, fmt.Errorf("failed to convert live object to proper version: %v", err) + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) } newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) if err != nil { - return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", newObjVersioned.GetObjectKind().GroupVersionKind(), err) + return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err) } liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) if err != nil { - return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", liveObjVersioned.GetObjectKind().GroupVersionKind(), err) + return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err) } apiVersion := fieldpath.APIVersion(f.groupVersion.String()) // TODO(apelisse) use the first return value when unions are implemented _, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), manager) if err != nil { - return nil, nil, fmt.Errorf("failed to update ManagedFields: %v", err) + return nil, nil, fmt.Errorf("failed to update ManagedFields (%v): %v", objectGVKNN(newObjVersioned), err) } managed = internal.NewManaged(managedFields, managed.Times()) @@ -123,16 +134,16 @@ func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed liveObjVersioned, err := f.toVersioned(liveObj) if err != nil { - return nil, nil, fmt.Errorf("failed to convert live object to proper version: %v", err) + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) } patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) if err != nil { - return nil, nil, fmt.Errorf("failed to create typed patch object: %v", err) + return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err) } liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) if err != nil { - return nil, nil, fmt.Errorf("failed to create typed live object: %v", err) + return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err) } apiVersion := fieldpath.APIVersion(f.groupVersion.String()) @@ -148,18 +159,18 @@ func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed newObj, err := f.typeConverter.TypedToObject(newObjTyped) if err != nil { - return nil, nil, fmt.Errorf("failed to convert new typed object to object: %v", err) + return nil, nil, fmt.Errorf("failed to convert new typed object (%v) to object: %v", objectGVKNN(patchObj), err) } newObjVersioned, err := f.toVersioned(newObj) if err != nil { - return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v", err) + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version: %v", objectGVKNN(patchObj), err) } f.objectDefaulter.Default(newObjVersioned) newObjUnversioned, err := f.toUnversioned(newObjVersioned) if err != nil { - return nil, nil, fmt.Errorf("failed to convert to unversioned: %v", err) + return nil, nil, fmt.Errorf("failed to convert to unversioned (%v): %v", objectGVKNN(patchObj), err) } return newObjUnversioned, managed, nil } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go index 8bb2f0c9e..fc40546f1 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "k8s.io/apimachinery/pkg/util/managedfields" "k8s.io/kube-openapi/pkg/util/proto" "sigs.k8s.io/structured-merge-diff/v4/typed" "sigs.k8s.io/structured-merge-diff/v4/value" @@ -65,7 +65,7 @@ func (DeducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Obje } type typeConverter struct { - parser *internal.GvkParser + parser *managedfields.GvkParser } var _ TypeConverter = &typeConverter{} @@ -74,7 +74,7 @@ var _ TypeConverter = &typeConverter{} // will automatically find the proper version of the object, and the // corresponding schema information. func NewTypeConverter(models proto.Models, preserveUnknownFields bool) (TypeConverter, error) { - parser, err := internal.NewGVKParser(models, preserveUnknownFields) + parser, err := managedfields.NewGVKParser(models, preserveUnknownFields) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go index 755da22ee..2eb0c174d 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go @@ -76,15 +76,11 @@ func (n ContextBasedNaming) Name(req *http.Request) (namespace, name string, err if !ok { return "", "", fmt.Errorf("missing requestInfo") } - ns, err := n.Namespace(req) - if err != nil { - return "", "", err - } if len(requestInfo.Name) == 0 { return "", "", errEmptyName } - return ns, requestInfo.Name, nil + return requestInfo.Namespace, requestInfo.Name, nil } // fastURLPathEncode encodes the provided path as a URL path diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 1abcccaa1..6803baaa6 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -23,6 +23,8 @@ import ( "strings" "time" + kjson "sigs.k8s.io/json" + jsonpatch "github.com/evanphx/json-patch" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -33,7 +35,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/mergepatch" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -123,10 +124,10 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac } options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("PatchOptions")) - ae := request.AuditEventFrom(ctx) + ae := audit.AuditEventFrom(ctx) admit = admission.WithAudit(admit, ae) - audit.LogRequestPatch(ae, patchBytes) + audit.LogRequestPatch(req.Context(), patchBytes) trace.Step("Recorded the audit event") baseContentType := runtime.ContentTypeJSON @@ -140,9 +141,15 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac } gv := scope.Kind.GroupVersion() + validationDirective := fieldValidation(options.FieldValidation) + decodeSerializer := s.Serializer + if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict { + decodeSerializer = s.StrictSerializer + } + codec := runtime.NewCodec( scope.Serializer.EncoderForVersion(s.Serializer, gv), - scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion), + scope.Serializer.DecoderToVersion(decodeSerializer, scope.HubGroupVersion), ) userInfo, _ := request.UserFrom(ctx) @@ -190,15 +197,16 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac } p := patcher{ - namer: scope.Namer, - creater: scope.Creater, - defaulter: scope.Defaulter, - typer: scope.Typer, - unsafeConvertor: scope.UnsafeConvertor, - kind: scope.Kind, - resource: scope.Resource, - subresource: scope.Subresource, - dryRun: dryrun.IsDryRun(options.DryRun), + namer: scope.Namer, + creater: scope.Creater, + defaulter: scope.Defaulter, + typer: scope.Typer, + unsafeConvertor: scope.UnsafeConvertor, + kind: scope.Kind, + resource: scope.Resource, + subresource: scope.Subresource, + dryRun: dryrun.IsDryRun(options.DryRun), + validationDirective: validationDirective, objectInterfaces: scope, @@ -251,15 +259,16 @@ type mutateObjectUpdateFunc func(ctx context.Context, obj, old runtime.Object) e // moved into this type. type patcher struct { // Pieces of RequestScope - namer ScopeNamer - creater runtime.ObjectCreater - defaulter runtime.ObjectDefaulter - typer runtime.ObjectTyper - unsafeConvertor runtime.ObjectConvertor - resource schema.GroupVersionResource - kind schema.GroupVersionKind - subresource string - dryRun bool + namer ScopeNamer + creater runtime.ObjectCreater + defaulter runtime.ObjectDefaulter + typer runtime.ObjectTyper + unsafeConvertor runtime.ObjectConvertor + resource schema.GroupVersionResource + kind schema.GroupVersionKind + subresource string + dryRun bool + validationDirective string objectInterfaces admission.ObjectInterfaces @@ -291,8 +300,8 @@ type patcher struct { } type patchMechanism interface { - applyPatchToCurrentObject(currentObject runtime.Object) (runtime.Object, error) - createNewObject() (runtime.Object, error) + applyPatchToCurrentObject(requextContext context.Context, currentObject runtime.Object) (runtime.Object, error) + createNewObject(requestContext context.Context) (runtime.Object, error) } type jsonPatcher struct { @@ -301,7 +310,7 @@ type jsonPatcher struct { fieldManager *fieldmanager.FieldManager } -func (p *jsonPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (runtime.Object, error) { +func (p *jsonPatcher) applyPatchToCurrentObject(requestContext context.Context, currentObject runtime.Object) (runtime.Object, error) { // Encode will convert & return a versioned object in JSON. currentObjJS, err := runtime.Encode(p.codec, currentObject) if err != nil { @@ -309,7 +318,7 @@ func (p *jsonPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (r } // Apply the patch. - patchedObjJS, err := p.applyJSPatch(currentObjJS) + patchedObjJS, appliedStrictErrs, err := p.applyJSPatch(currentObjJS) if err != nil { return nil, err } @@ -317,9 +326,32 @@ func (p *jsonPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (r // Construct the resulting typed, unversioned object. objToUpdate := p.restPatcher.New() if err := runtime.DecodeInto(p.codec, patchedObjJS, objToUpdate); err != nil { - return nil, errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ - field.Invalid(field.NewPath("patch"), string(patchedObjJS), err.Error()), - }) + strictError, isStrictError := runtime.AsStrictDecodingError(err) + switch { + case !isStrictError: + // disregard any appliedStrictErrs, because it's an incomplete + // list of strict errors given that we don't know what fields were + // unknown because DecodeInto failed. Non-strict errors trump in this case. + return nil, errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), string(patchedObjJS), err.Error()), + }) + case p.validationDirective == metav1.FieldValidationWarn: + addStrictDecodingWarnings(requestContext, append(appliedStrictErrs, strictError.Errors()...)) + default: + strictDecodingError := runtime.NewStrictDecodingError(append(appliedStrictErrs, strictError.Errors()...)) + return nil, errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), string(patchedObjJS), strictDecodingError.Error()), + }) + } + } else if len(appliedStrictErrs) > 0 { + switch { + case p.validationDirective == metav1.FieldValidationWarn: + addStrictDecodingWarnings(requestContext, appliedStrictErrs) + default: + return nil, errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), string(patchedObjJS), runtime.NewStrictDecodingError(appliedStrictErrs).Error()), + }) + } } if p.fieldManager != nil { @@ -328,52 +360,65 @@ func (p *jsonPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (r return objToUpdate, nil } -func (p *jsonPatcher) createNewObject() (runtime.Object, error) { +func (p *jsonPatcher) createNewObject(_ context.Context) (runtime.Object, error) { return nil, errors.NewNotFound(p.resource.GroupResource(), p.name) } +type jsonPatchOp struct { + Op string `json:"op"` + Path string `json:"path"` + From string `json:"from"` + Value interface{} `json:"value"` +} + // applyJSPatch applies the patch. Input and output objects must both have // the external version, since that is what the patch must have been constructed against. -func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr error) { +func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, strictErrors []error, retErr error) { switch p.patchType { case types.JSONPatchType: - // sanity check potentially abusive patches - // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) - if len(p.patchBytes) > 1024*1024 { - v := []interface{}{} - if err := json.Unmarshal(p.patchBytes, &v); err != nil { - return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) + if p.validationDirective == metav1.FieldValidationStrict || p.validationDirective == metav1.FieldValidationWarn { + var v []jsonPatchOp + var err error + if strictErrors, err = kjson.UnmarshalStrict(p.patchBytes, &v); err != nil { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) + } + for i, e := range strictErrors { + strictErrors[i] = fmt.Errorf("json patch %v", e) } } patchObj, err := jsonpatch.DecodePatch(p.patchBytes) if err != nil { - return nil, errors.NewBadRequest(err.Error()) + return nil, nil, errors.NewBadRequest(err.Error()) } if len(patchObj) > maxJSONPatchOperations { - return nil, errors.NewRequestEntityTooLargeError( + return nil, nil, errors.NewRequestEntityTooLargeError( fmt.Sprintf("The allowed maximum operations in a JSON patch is %d, got %d", maxJSONPatchOperations, len(patchObj))) } patchedJS, err := patchObj.Apply(versionedJS) if err != nil { - return nil, errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false) + return nil, nil, errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false) } - return patchedJS, nil + return patchedJS, strictErrors, nil case types.MergePatchType: - // sanity check potentially abusive patches - // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) - if len(p.patchBytes) > 1024*1024 { + if p.validationDirective == metav1.FieldValidationStrict || p.validationDirective == metav1.FieldValidationWarn { v := map[string]interface{}{} - if err := json.Unmarshal(p.patchBytes, &v); err != nil { - return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) + var err error + strictErrors, err = kjson.UnmarshalStrict(p.patchBytes, &v) + if err != nil { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) } } - return jsonpatch.MergePatch(versionedJS, p.patchBytes) + patchedJS, retErr = jsonpatch.MergePatch(versionedJS, p.patchBytes) + if retErr == jsonpatch.ErrBadJSONPatch { + return nil, nil, errors.NewBadRequest(retErr.Error()) + } + return patchedJS, strictErrors, retErr default: // only here as a safety net - go-restful filters content-type - return nil, fmt.Errorf("unknown Content-Type header for patch: %v", p.patchType) + return nil, nil, fmt.Errorf("unknown Content-Type header for patch: %v", p.patchType) } } @@ -385,7 +430,7 @@ type smpPatcher struct { fieldManager *fieldmanager.FieldManager } -func (p *smpPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (runtime.Object, error) { +func (p *smpPatcher) applyPatchToCurrentObject(requestContext context.Context, currentObject runtime.Object) (runtime.Object, error) { // Since the patch is applied on versioned objects, we need to convert the // current object to versioned representation first. currentVersionedObject, err := p.unsafeConvertor.ConvertToVersion(currentObject, p.kind.GroupVersion()) @@ -396,7 +441,7 @@ func (p *smpPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (ru if err != nil { return nil, err } - if err := strategicPatchObject(p.defaulter, currentVersionedObject, p.patchBytes, versionedObjToUpdate, p.schemaReferenceObj); err != nil { + if err := strategicPatchObject(requestContext, p.defaulter, currentVersionedObject, p.patchBytes, versionedObjToUpdate, p.schemaReferenceObj, p.validationDirective); err != nil { return nil, err } // Convert the object back to the hub version @@ -411,20 +456,21 @@ func (p *smpPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (ru return newObj, nil } -func (p *smpPatcher) createNewObject() (runtime.Object, error) { +func (p *smpPatcher) createNewObject(_ context.Context) (runtime.Object, error) { return nil, errors.NewNotFound(p.resource.GroupResource(), p.name) } type applyPatcher struct { - patch []byte - options *metav1.PatchOptions - creater runtime.ObjectCreater - kind schema.GroupVersionKind - fieldManager *fieldmanager.FieldManager - userAgent string + patch []byte + options *metav1.PatchOptions + creater runtime.ObjectCreater + kind schema.GroupVersionKind + fieldManager *fieldmanager.FieldManager + userAgent string + validationDirective string } -func (p *applyPatcher) applyPatchToCurrentObject(obj runtime.Object) (runtime.Object, error) { +func (p *applyPatcher) applyPatchToCurrentObject(requestContext context.Context, obj runtime.Object) (runtime.Object, error) { force := false if p.options.Force != nil { force = *p.options.Force @@ -438,15 +484,30 @@ func (p *applyPatcher) applyPatchToCurrentObject(obj runtime.Object) (runtime.Ob return nil, errors.NewBadRequest(fmt.Sprintf("error decoding YAML: %v", err)) } - return p.fieldManager.Apply(obj, patchObj, p.options.FieldManager, force) + obj, err := p.fieldManager.Apply(obj, patchObj, p.options.FieldManager, force) + if err != nil { + return obj, err + } + + // TODO: spawn something to track deciding whether a fieldValidation=Strict + // fatal error should return before an error from the apply operation + if p.validationDirective == metav1.FieldValidationStrict || p.validationDirective == metav1.FieldValidationWarn { + if err := yaml.UnmarshalStrict(p.patch, &map[string]interface{}{}); err != nil { + if p.validationDirective == metav1.FieldValidationStrict { + return nil, errors.NewBadRequest(fmt.Sprintf("error strict decoding YAML: %v", err)) + } + addStrictDecodingWarnings(requestContext, []error{err}) + } + } + return obj, nil } -func (p *applyPatcher) createNewObject() (runtime.Object, error) { +func (p *applyPatcher) createNewObject(requestContext context.Context) (runtime.Object, error) { obj, err := p.creater.New(p.kind) if err != nil { return nil, fmt.Errorf("failed to create new object: %v", err) } - return p.applyPatchToCurrentObject(obj) + return p.applyPatchToCurrentObject(requestContext, obj) } // strategicPatchObject applies a strategic merge patch of to @@ -455,11 +516,13 @@ func (p *applyPatcher) createNewObject() (runtime.Object, error) { // and . // NOTE: Both and are supposed to be versioned. func strategicPatchObject( + requestContext context.Context, defaulter runtime.ObjectDefaulter, originalObject runtime.Object, patchBytes []byte, objToUpdate runtime.Object, schemaReferenceObj runtime.Object, + validationDirective string, ) error { originalObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(originalObject) if err != nil { @@ -467,11 +530,19 @@ func strategicPatchObject( } patchMap := make(map[string]interface{}) - if err := json.Unmarshal(patchBytes, &patchMap); err != nil { - return errors.NewBadRequest(err.Error()) + var strictErrs []error + if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict { + strictErrs, err = kjson.UnmarshalStrict(patchBytes, &patchMap) + if err != nil { + return errors.NewBadRequest(err.Error()) + } + } else { + if err = kjson.UnmarshalCaseSensitivePreserveInts(patchBytes, &patchMap); err != nil { + return errors.NewBadRequest(err.Error()) + } } - if err := applyPatchToObject(defaulter, originalObjMap, patchMap, objToUpdate, schemaReferenceObj); err != nil { + if err := applyPatchToObject(requestContext, defaulter, originalObjMap, patchMap, objToUpdate, schemaReferenceObj, strictErrs, validationDirective); err != nil { return err } return nil @@ -480,16 +551,16 @@ func strategicPatchObject( // applyPatch is called every time GuaranteedUpdate asks for the updated object, // and is given the currently persisted object as input. // TODO: rename this function because the name implies it is related to applyPatcher -func (p *patcher) applyPatch(_ context.Context, _, currentObject runtime.Object) (objToUpdate runtime.Object, patchErr error) { +func (p *patcher) applyPatch(ctx context.Context, _, currentObject runtime.Object) (objToUpdate runtime.Object, patchErr error) { // Make sure we actually have a persisted currentObject p.trace.Step("About to apply patch") currentObjectHasUID, err := hasUID(currentObject) if err != nil { return nil, err } else if !currentObjectHasUID { - objToUpdate, patchErr = p.mechanism.createNewObject() + objToUpdate, patchErr = p.mechanism.createNewObject(ctx) } else { - objToUpdate, patchErr = p.mechanism.applyPatchToCurrentObject(currentObject) + objToUpdate, patchErr = p.mechanism.applyPatchToCurrentObject(ctx, currentObject) } if patchErr != nil { @@ -565,12 +636,13 @@ func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runti // this case is unreachable if ServerSideApply is not enabled because we will have already rejected the content type case types.ApplyPatchType: p.mechanism = &applyPatcher{ - fieldManager: scope.FieldManager, - patch: p.patchBytes, - options: p.options, - creater: p.creater, - kind: p.kind, - userAgent: p.userAgent, + fieldManager: scope.FieldManager, + patch: p.patchBytes, + options: p.options, + creater: p.creater, + kind: p.kind, + userAgent: p.userAgent, + validationDirective: p.validationDirective, } p.forceAllowCreate = true default: @@ -592,6 +664,7 @@ func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runti return updateObject, updateErr } result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { + result, err := requestFunc() // If the object wasn't committed to storage because it's serialized size was too large, // it is safe to remove managedFields (which can be large) and try again. @@ -618,11 +691,14 @@ func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runti // and stores the result in . // NOTE: must be a versioned object. func applyPatchToObject( + requestContext context.Context, defaulter runtime.ObjectDefaulter, originalMap map[string]interface{}, patchMap map[string]interface{}, objToUpdate runtime.Object, schemaReferenceObj runtime.Object, + strictErrs []error, + validationDirective string, ) error { patchedObjMap, err := strategicpatch.StrategicMergeMapPatch(originalMap, patchMap, schemaReferenceObj) if err != nil { @@ -630,11 +706,38 @@ func applyPatchToObject( } // Rather than serialize the patched map to JSON, then decode it to an object, we go directly from a map to an object - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(patchedObjMap, objToUpdate); err != nil { - return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ - field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), err.Error()), - }) + converter := runtime.DefaultUnstructuredConverter + returnUnknownFields := validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict + if err := converter.FromUnstructuredWithValidation(patchedObjMap, objToUpdate, returnUnknownFields); err != nil { + strictError, isStrictError := runtime.AsStrictDecodingError(err) + switch { + case !isStrictError: + // disregard any sttrictErrs, because it's an incomplete + // list of strict errors given that we don't know what fields were + // unknown because StrategicMergeMapPatch failed. + // Non-strict errors trump in this case. + return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), err.Error()), + }) + case validationDirective == metav1.FieldValidationWarn: + addStrictDecodingWarnings(requestContext, append(strictErrs, strictError.Errors()...)) + default: + strictDecodingError := runtime.NewStrictDecodingError(append(strictErrs, strictError.Errors()...)) + return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), strictDecodingError.Error()), + }) + } + } else if len(strictErrs) > 0 { + switch { + case validationDirective == metav1.FieldValidationWarn: + addStrictDecodingWarnings(requestContext, strictErrs) + default: + return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), runtime.NewStrictDecodingError(strictErrs).Error()), + }) + } } + // Decoding from JSON to a versioned object would apply defaults, so we do the same here defaulter.Default(objToUpdate) @@ -659,8 +762,9 @@ func patchToUpdateOptions(po *metav1.PatchOptions) *metav1.UpdateOptions { return nil } uo := &metav1.UpdateOptions{ - DryRun: po.DryRun, - FieldManager: po.FieldManager, + DryRun: po.DryRun, + FieldManager: po.FieldManager, + FieldValidation: po.FieldValidation, } uo.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions")) return uo @@ -672,8 +776,9 @@ func patchToCreateOptions(po *metav1.PatchOptions) *metav1.CreateOptions { return nil } co := &metav1.CreateOptions{ - DryRun: po.DryRun, - FieldManager: po.FieldManager, + DryRun: po.DryRun, + FieldManager: po.FieldManager, + FieldValidation: po.FieldValidation, } co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) return co diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go index 16e16c353..0e4b98b83 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go @@ -201,7 +201,8 @@ func (w *deferredResponseWriter) Write(p []byte) (n int, err error) { w.trace.Step("Write call finished", utiltrace.Field{"writer", fmt.Sprintf("%T", w.w)}, utiltrace.Field{"size", len(p)}, - utiltrace.Field{"firstWrite", firstWrite}) + utiltrace.Field{"firstWrite", firstWrite}, + utiltrace.Field{"err", err}) }() } if w.hasWritten { @@ -267,9 +268,7 @@ func WriteObjectNegotiated(s runtime.NegotiatedSerializer, restrictions negotiat return } - if ae := request.AuditEventFrom(req.Context()); ae != nil { - audit.LogResponseObject(ae, object, gv, s) - } + audit.LogResponseObject(req.Context(), object, gv, s) encoder := s.EncoderForVersion(serializer.Serializer, gv) SerializeObject(serializer.MediaType, encoder, w, req, statusCode, object) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index 4c2e63e10..c5fd95e89 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -39,6 +39,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" @@ -63,6 +64,10 @@ const ( // NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission. // For PATCH request the API server only dedups after mutating admission. DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat = ".metadata.ownerReferences contains duplicate entries after mutating admission happens; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v" + // shortPrefix is one possible beginning of yaml unmarshal strict errors. + shortPrefix = "yaml: unmarshal errors:\n" + // longPrefix is the other possible beginning of yaml unmarshal strict errors. + longPrefix = "error converting YAML to JSON: yaml: unmarshal errors:\n" ) // RequestScope encapsulates common fields across all RESTful handler methods. @@ -187,7 +192,7 @@ func ConnectResource(connecter rest.Connecter, scope *RequestScope, admit admiss } ctx := req.Context() ctx = request.WithNamespace(ctx, namespace) - ae := request.AuditEventFrom(ctx) + ae := audit.AuditEventFrom(ctx) admit = admission.WithAudit(admit, ae) opts, subpath, subpathKey := connecter.NewConnectOptions() @@ -456,6 +461,53 @@ func isDryRun(url *url.URL) bool { return len(url.Query()["dryRun"]) != 0 } +// fieldValidation checks that the field validation feature is enabled +// and returns a valid directive of either +// - Ignore (default when feature is disabled) +// - Warn (default when feature is enabled) +// - Strict +func fieldValidation(directive string) string { + if !utilfeature.DefaultFeatureGate.Enabled(features.ServerSideFieldValidation) { + return metav1.FieldValidationIgnore + } + if directive == "" { + return metav1.FieldValidationWarn + } + return directive +} + +// parseYAMLWarnings takes the strict decoding errors from the yaml decoder's output +// and parses each individual warnings, or leaves the warning as is if +// it does not look like a yaml strict decoding error. +func parseYAMLWarnings(errString string) []string { + var trimmedString string + if trimmedShortString := strings.TrimPrefix(errString, shortPrefix); len(trimmedShortString) < len(errString) { + trimmedString = trimmedShortString + } else if trimmedLongString := strings.TrimPrefix(errString, longPrefix); len(trimmedLongString) < len(errString) { + trimmedString = trimmedLongString + } else { + // not a yaml error, return as-is + return []string{errString} + } + + splitStrings := strings.Split(trimmedString, "\n") + for i, s := range splitStrings { + splitStrings[i] = strings.TrimSpace(s) + } + return splitStrings +} + +// addStrictDecodingWarnings confirms that the error is a strict decoding error +// and if so adds a warning for each strict decoding violation. +func addStrictDecodingWarnings(requestContext context.Context, errs []error) { + for _, e := range errs { + yamlWarnings := parseYAMLWarnings(e.Error()) + for _, w := range yamlWarnings { + warning.AddWarning(requestContext, "", w) + } + } +} + type etcdError interface { Code() grpccodes.Code Error() string diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go index e0dbd54a5..96eda0b9c 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -41,6 +41,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog/v2" utiltrace "k8s.io/utils/trace" ) @@ -102,14 +103,30 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa defaultGVK := scope.Kind original := r.New() + validationDirective := fieldValidation(options.FieldValidation) + decodeSerializer := s.Serializer + if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict { + decodeSerializer = s.StrictSerializer + } + + decoder := scope.Serializer.DecoderToVersion(decodeSerializer, scope.HubGroupVersion) trace.Step("About to convert to expected version") - decoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion) obj, gvk, err := decoder.Decode(body, &defaultGVK, original) if err != nil { - err = transformDecodeError(scope.Typer, err, original, gvk, body) - scope.err(err, w, req) - return + strictError, isStrictError := runtime.AsStrictDecodingError(err) + switch { + case isStrictError && obj != nil && validationDirective == metav1.FieldValidationWarn: + addStrictDecodingWarnings(req.Context(), strictError.Errors()) + case isStrictError && validationDirective == metav1.FieldValidationIgnore: + klog.Warningf("unexpected strict error when field validation is set to ignore") + fallthrough + default: + err = transformDecodeError(scope.Typer, err, original, gvk, body) + scope.err(err, w, req) + return + } } + objGV := gvk.GroupVersion() if !scope.AcceptsGroupVersion(objGV) { err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", objGV, defaultGVK.GroupVersion())) @@ -118,8 +135,8 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa } trace.Step("Conversion done") - ae := request.AuditEventFrom(ctx) - audit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer) + ae := audit.AuditEventFrom(ctx) + audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, scope.Serializer) admit = admission.WithAudit(admit, ae) if err := checkName(obj, name, namespace, scope.Namer); err != nil { @@ -266,8 +283,9 @@ func updateToCreateOptions(uo *metav1.UpdateOptions) *metav1.CreateOptions { return nil } co := &metav1.CreateOptions{ - DryRun: uo.DryRun, - FieldManager: uo.FieldManager, + DryRun: uo.DryRun, + FieldManager: uo.FieldManager, + FieldValidation: uo.FieldValidation, } co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) return co diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go index 47fe44b78..818d94ae4 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/metrics" - "k8s.io/apiserver/pkg/server/httplog" "k8s.io/apiserver/pkg/util/wsstream" "golang.org/x/net/websocket" @@ -166,8 +165,6 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { metrics.RegisteredWatchers.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc() defer metrics.RegisteredWatchers.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Dec() - w = httplog.Unlogged(req, w) - if wsstream.IsWebSocketRequest(req) { w.Header().Set("Content-Type", s.MediaType) websocket.Handler(s.HandleWS).ServeHTTP(w, req) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go index 0902add48..13d703afb 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -217,7 +217,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag isSubresource := len(subresource) > 0 // If there is a subresource, namespace scoping is defined by the parent resource - namespaceScoped := true + var namespaceScoped bool if isSubresource { parentStorage, ok := a.group.Storage[resource] if !ok { @@ -541,6 +541,11 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag } } + var disabledParams []string + if !utilfeature.DefaultFeatureGate.Enabled(features.ServerSideFieldValidation) { + disabledParams = []string{"fieldValidation"} + } + // Create Routes for the actions. // TODO: Add status documentation using Returns() // Errors (see api/errors/errors.go as well as go-restful router): @@ -771,7 +776,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Returns(http.StatusCreated, "Created", producedObject). Reads(defaultVersionedObject). Writes(producedObject) - if err := AddObjectParams(ws, route, versionedUpdateOptions); err != nil { + if err := AddObjectParams(ws, route, versionedUpdateOptions, disabledParams...); err != nil { return nil, nil, err } addParams(route, action.Params) @@ -802,7 +807,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Returns(http.StatusCreated, "Created", producedObject). Reads(metav1.Patch{}). Writes(producedObject) - if err := AddObjectParams(ws, route, versionedPatchOptions); err != nil { + if err := AddObjectParams(ws, route, versionedPatchOptions, disabledParams...); err != nil { return nil, nil, err } addParams(route, action.Params) @@ -833,7 +838,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Returns(http.StatusAccepted, "Accepted", producedObject). Reads(defaultVersionedObject). Writes(producedObject) - if err := AddObjectParams(ws, route, versionedCreateOptions); err != nil { + if err := AddObjectParams(ws, route, versionedCreateOptions, disabledParams...); err != nil { return nil, nil, err } addParams(route, action.Params) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 7e0715eb5..7dd40a373 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -17,9 +17,7 @@ limitations under the License. package metrics import ( - "bufio" "context" - "net" "net/http" "net/url" "strconv" @@ -34,6 +32,7 @@ import ( "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/endpoints/responsewriter" "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" compbasemetrics "k8s.io/component-base/metrics" @@ -80,14 +79,23 @@ var ( }, []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "code"}, ) - longRunningRequestGauge = compbasemetrics.NewGaugeVec( + longRunningRequestsGauge = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ - Name: "apiserver_longrunning_gauge", + Name: "apiserver_longrunning_requests", Help: "Gauge of all active long-running apiserver requests broken out by verb, group, version, resource, scope and component. Not all requests are tracked this way.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) + longRunningRequestGauge = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Name: "apiserver_longrunning_gauge", + Help: "Gauge of all active long-running apiserver requests broken out by verb, group, version, resource, scope and component. Not all requests are tracked this way.", + StabilityLevel: compbasemetrics.ALPHA, + DeprecatedVersion: "1.23.0", + }, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, + ) requestLatencies = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Name: "apiserver_request_duration_seconds", @@ -95,19 +103,32 @@ var ( // This metric is used for verifying api call latencies SLO, // as well as tracking regressions in this aspects. // Thus we customize buckets significantly, to empower both usecases. - Buckets: []float64{0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, - 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}, + Buckets: []float64{0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3, + 4, 5, 6, 8, 10, 15, 20, 30, 45, 60}, StabilityLevel: compbasemetrics.STABLE, }, []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component"}, ) + requestSloLatencies = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Name: "apiserver_request_slo_duration_seconds", + Help: "Response latency distribution (not counting webhook duration) in seconds for each verb, group, version, resource, subresource, scope and component.", + // This metric is supplementary to the requestLatencies metric. + // It measures request duration excluding webhooks as they are mostly + // dependant on user configuration. + Buckets: []float64{0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3, + 4, 5, 6, 8, 10, 15, 20, 30, 45, 60}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, + ) responseSizes = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Name: "apiserver_response_sizes", Help: "Response size distribution in bytes for each group, version, verb, resource, subresource, scope and component.", // Use buckets ranging from 1000 bytes (1KB) to 10^9 bytes (1GB). Buckets: compbasemetrics.ExponentialBuckets(1000, 10.0, 7), - StabilityLevel: compbasemetrics.ALPHA, + StabilityLevel: compbasemetrics.STABLE, }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) @@ -131,9 +152,10 @@ var ( // RegisteredWatchers is a number of currently registered watchers splitted by resource. RegisteredWatchers = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ - Name: "apiserver_registered_watchers", - Help: "Number of currently registered watchers for a given resources", - StabilityLevel: compbasemetrics.ALPHA, + Name: "apiserver_registered_watchers", + Help: "Number of currently registered watchers for a given resources", + StabilityLevel: compbasemetrics.ALPHA, + DeprecatedVersion: "1.23.0", }, []string{"group", "version", "kind"}, ) @@ -160,7 +182,7 @@ var ( &compbasemetrics.GaugeOpts{ Name: "apiserver_current_inflight_requests", Help: "Maximal number of currently used inflight request limit of this apiserver per request kind in last second.", - StabilityLevel: compbasemetrics.ALPHA, + StabilityLevel: compbasemetrics.STABLE, }, []string{"request_kind"}, ) @@ -234,8 +256,10 @@ var ( metrics = []resettableCollector{ deprecatedRequestGauge, requestCounter, + longRunningRequestsGauge, longRunningRequestGauge, requestLatencies, + requestSloLatencies, responseSizes, DroppedRequests, TLSHandshakeErrors, @@ -370,7 +394,7 @@ func RecordRequestAbort(req *http.Request, requestInfo *request.RequestInfo) { } scope := CleanScope(requestInfo) - reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), req) + reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), getVerbIfWatch(req), req) resource := requestInfo.Resource subresource := requestInfo.Subresource group := requestInfo.APIGroup @@ -393,7 +417,7 @@ func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInf // InstrumentRouteFunc which is registered in installer.go with predefined // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), req) + reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), getVerbIfWatch(req), req) if requestInfo.IsResourceRequest { requestTerminationsTotal.WithContext(req.Context()).WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc() @@ -408,22 +432,28 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp if requestInfo == nil { requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} } - var g compbasemetrics.GaugeMetric + var g, e compbasemetrics.GaugeMetric scope := CleanScope(requestInfo) // We don't use verb from , as this may be propagated from // InstrumentRouteFunc which is registered in installer.go with predefined // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), req) + reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), getVerbIfWatch(req), req) if requestInfo.IsResourceRequest { + e = longRunningRequestsGauge.WithContext(req.Context()).WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component) g = longRunningRequestGauge.WithContext(req.Context()).WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component) } else { + e = longRunningRequestsGauge.WithContext(req.Context()).WithLabelValues(reportedVerb, "", "", "", requestInfo.Path, scope, component) g = longRunningRequestGauge.WithContext(req.Context()).WithLabelValues(reportedVerb, "", "", "", requestInfo.Path, scope, component) } + e.Inc() g.Inc() - defer g.Dec() + defer func() { + e.Dec() + g.Dec() + }() fn() } @@ -434,7 +464,7 @@ func MonitorRequest(req *http.Request, verb, group, version, resource, subresour // InstrumentRouteFunc which is registered in installer.go with predefined // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), req) + reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), verb, req) dryRun := cleanDryRun(req.URL) elapsedSeconds := elapsed.Seconds() @@ -452,6 +482,10 @@ func MonitorRequest(req *http.Request, verb, group, version, resource, subresour } } requestLatencies.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component).Observe(elapsedSeconds) + if wd, ok := request.WebhookDurationFrom(req.Context()); ok { + sloLatency := elapsedSeconds - (wd.AdmitTracker.GetLatency() + wd.ValidateTracker.GetLatency()).Seconds() + requestSloLatencies.WithContext(req.Context()).WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(sloLatency) + } // We are only interested in response sizes of read requests. if verb == "GET" || verb == "LIST" { responseSizes.WithContext(req.Context()).WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(float64(respSize)) @@ -469,16 +503,7 @@ func InstrumentRouteFunc(verb, group, version, resource, subresource, scope, com delegate := &ResponseWriterDelegator{ResponseWriter: response.ResponseWriter} - //lint:file-ignore SA1019 Keep supporting deprecated http.CloseNotifier - _, cn := response.ResponseWriter.(http.CloseNotifier) - _, fl := response.ResponseWriter.(http.Flusher) - _, hj := response.ResponseWriter.(http.Hijacker) - var rw http.ResponseWriter - if cn && fl && hj { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } + rw := responsewriter.WrapForHTTP1Or2(delegate) response.ResponseWriter = rw routeFunc(req, response) @@ -496,15 +521,7 @@ func InstrumentHandlerFunc(verb, group, version, resource, subresource, scope, c } delegate := &ResponseWriterDelegator{ResponseWriter: w} - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - if cn && fl && hj { - w = &fancyResponseWriterDelegator{delegate} - } else { - w = delegate - } + w = responsewriter.WrapForHTTP1Or2(delegate) handler(w, req) @@ -545,13 +562,8 @@ func CanonicalVerb(verb string, scope string) string { // LIST and APPLY from PATCH. func CleanVerb(verb string, request *http.Request) string { reportedVerb := verb - if verb == "LIST" { - // see apimachinery/pkg/runtime/conversion.go Convert_Slice_string_To_bool - if values := request.URL.Query()["watch"]; len(values) > 0 { - if value := strings.ToLower(values[0]); value != "0" && value != "false" { - reportedVerb = "WATCH" - } - } + if verb == "LIST" && checkIfWatch(request) { + reportedVerb = "WATCH" } // normalize the legacy WATCHLIST to WATCH to ensure users aren't surprised by metrics if verb == "WATCHLIST" { @@ -564,14 +576,44 @@ func CleanVerb(verb string, request *http.Request) string { } // cleanVerb additionally ensures that unknown verbs don't clog up the metrics. -func cleanVerb(verb string, request *http.Request) string { - reportedVerb := CleanVerb(verb, request) +func cleanVerb(verb, suggestedVerb string, request *http.Request) string { + // CanonicalVerb (being an input for this function) doesn't handle correctly the + // deprecated path pattern for watch of: + // GET /api/{version}/watch/{resource} + // We correct it manually based on the pass verb from the installer. + var reportedVerb string + if suggestedVerb == "WATCH" || suggestedVerb == "WATCHLIST" { + reportedVerb = "WATCH" + } else { + reportedVerb = CleanVerb(verb, request) + } if validRequestMethods.Has(reportedVerb) { return reportedVerb } return OtherRequestMethod } +//getVerbIfWatch additionally ensures that GET or List would be transformed to WATCH +func getVerbIfWatch(req *http.Request) string { + if strings.ToUpper(req.Method) == "GET" || strings.ToUpper(req.Method) == "LIST" { + if checkIfWatch(req) { + return "WATCH" + } + } + return "" +} + +//checkIfWatch check request is watch +func checkIfWatch(req *http.Request) bool { + // see apimachinery/pkg/runtime/conversion.go Convert_Slice_string_To_bool + if values := req.URL.Query()["watch"]; len(values) > 0 { + if value := strings.ToLower(values[0]); value != "0" && value != "false" { + return true + } + } + return false +} + func cleanDryRun(u *url.URL) string { // avoid allocating when we don't see dryRun in the query if !strings.Contains(u.RawQuery, "dryRun") { @@ -588,6 +630,9 @@ func cleanDryRun(u *url.URL) string { return strings.Join(utilsets.NewString(dryRun...).List(), ",") } +var _ http.ResponseWriter = (*ResponseWriterDelegator)(nil) +var _ responsewriter.UserProvidedDecorator = (*ResponseWriterDelegator)(nil) + // ResponseWriterDelegator interface wraps http.ResponseWriter to additionally record content-length, status-code, etc. type ResponseWriterDelegator struct { http.ResponseWriter @@ -597,6 +642,10 @@ type ResponseWriterDelegator struct { wroteHeader bool } +func (r *ResponseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + func (r *ResponseWriterDelegator) WriteHeader(code int) { r.status = code r.wroteHeader = true @@ -620,22 +669,6 @@ func (r *ResponseWriterDelegator) ContentLength() int { return int(r.written) } -type fancyResponseWriterDelegator struct { - *ResponseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - // Small optimization over Itoa func codeToString(s int) string { switch s { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go index 95166f5c4..8f4e60f54 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go @@ -20,7 +20,6 @@ import ( "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/authentication/user" ) @@ -33,9 +32,6 @@ const ( // userKey is the context key for the request user. userKey - - // auditKey is the context key for the audit event. - auditKey ) // NewContext instantiates a base context object for request flows. @@ -80,14 +76,3 @@ func UserFrom(ctx context.Context) (user.Info, bool) { user, ok := ctx.Value(userKey).(user.Info) return user, ok } - -// WithAuditEvent returns set audit event struct. -func WithAuditEvent(parent context.Context, ev *audit.Event) context.Context { - return WithValue(parent, auditKey, ev) -} - -// AuditEventFrom returns the audit event struct on the ctx -func AuditEventFrom(ctx context.Context) *audit.Event { - ev, _ := ctx.Value(auditKey).(*audit.Event) - return ev -} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go new file mode 100644 index 000000000..209b33cdf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go @@ -0,0 +1,122 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "context" + "sync" + "time" + + "k8s.io/utils/clock" +) + +func sumDuration(d1 time.Duration, d2 time.Duration) time.Duration { + return d1 + d2 +} + +func maxDuration(d1 time.Duration, d2 time.Duration) time.Duration { + if d1 > d2 { + return d1 + } + return d2 +} + +// DurationTracker is a simple interface for tracking functions duration +type DurationTracker interface { + Track(func()) + GetLatency() time.Duration +} + +// durationTracker implements DurationTracker by measuring function time +// using given clock and aggregates the duration using given aggregate function +type durationTracker struct { + clock clock.Clock + latency time.Duration + mu sync.Mutex + aggregateFunction func(time.Duration, time.Duration) time.Duration +} + +// Track measures time spent in given function and aggregates measured +// duration using aggregateFunction +func (t *durationTracker) Track(f func()) { + startedAt := t.clock.Now() + defer func() { + duration := t.clock.Since(startedAt) + t.mu.Lock() + defer t.mu.Unlock() + t.latency = t.aggregateFunction(t.latency, duration) + }() + + f() +} + +// GetLatency returns aggregated latency tracked by a tracker +func (t *durationTracker) GetLatency() time.Duration { + t.mu.Lock() + defer t.mu.Unlock() + return t.latency +} + +func newSumLatencyTracker(c clock.Clock) DurationTracker { + return &durationTracker{ + clock: c, + aggregateFunction: sumDuration, + } +} + +func newMaxLatencyTracker(c clock.Clock) DurationTracker { + return &durationTracker{ + clock: c, + aggregateFunction: maxDuration, + } +} + +// WebhookDuration stores trackers used to measure webhook request durations. +// Since admit webhooks are done sequentially duration is aggregated using +// sum function. Validate webhooks are done in parallel so max function +// is used. +type WebhookDuration struct { + AdmitTracker DurationTracker + ValidateTracker DurationTracker +} + +type webhookDurationKeyType int + +// webhookDurationKey is the WebhookDuration (the time the request spent waiting +// for the webhooks to finish) key for the context. +const webhookDurationKey webhookDurationKeyType = iota + +// WithWebhookDuration returns a copy of parent context to which the +// WebhookDuration trackers are added. +func WithWebhookDuration(parent context.Context) context.Context { + return WithWebhookDurationAndCustomClock(parent, clock.RealClock{}) +} + +// WithWebhookDurationAndCustomClock returns a copy of parent context to which +// the WebhookDuration trackers are added. Tracers use given clock. +func WithWebhookDurationAndCustomClock(parent context.Context, c clock.Clock) context.Context { + return WithValue(parent, webhookDurationKey, &WebhookDuration{ + AdmitTracker: newSumLatencyTracker(c), + ValidateTracker: newMaxLatencyTracker(c), + }) +} + +// WebhookDurationFrom returns the value of the WebhookDuration key from the specified context. +func WebhookDurationFrom(ctx context.Context) (*WebhookDuration, bool) { + wd, ok := ctx.Value(webhookDurationKey).(*WebhookDuration) + return wd, ok && wd != nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/responsewriter/fake.go b/vendor/k8s.io/apiserver/pkg/endpoints/responsewriter/fake.go new file mode 100644 index 000000000..3a8fe7a6a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/responsewriter/fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package responsewriter + +import ( + "bufio" + "net" + "net/http" +) + +var _ http.ResponseWriter = &FakeResponseWriter{} + +// FakeResponseWriter implements http.ResponseWriter, +// it is used for testing purpose only +type FakeResponseWriter struct{} + +func (fw *FakeResponseWriter) Header() http.Header { return http.Header{} } +func (fw *FakeResponseWriter) WriteHeader(code int) {} +func (fw *FakeResponseWriter) Write(bs []byte) (int, error) { return len(bs), nil } + +// For HTTP2 an http.ResponseWriter object implements +// http.Flusher and http.CloseNotifier. +// It is used for testing purpose only +type FakeResponseWriterFlusherCloseNotifier struct { + *FakeResponseWriter +} + +func (fw *FakeResponseWriterFlusherCloseNotifier) Flush() {} +func (fw *FakeResponseWriterFlusherCloseNotifier) CloseNotify() <-chan bool { return nil } + +// For HTTP/1.x an http.ResponseWriter object implements +// http.Flusher, http.CloseNotifier and http.Hijacker. +// It is used for testing purpose only +type FakeResponseWriterFlusherCloseNotifierHijacker struct { + *FakeResponseWriterFlusherCloseNotifier +} + +func (fw *FakeResponseWriterFlusherCloseNotifierHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return nil, nil, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/responsewriter/wrapper.go b/vendor/k8s.io/apiserver/pkg/endpoints/responsewriter/wrapper.go new file mode 100644 index 000000000..758e7addd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/responsewriter/wrapper.go @@ -0,0 +1,180 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package responsewriter + +import ( + "bufio" + "net" + "net/http" +) + +// UserProvidedDecorator represensts a user (client that uses this package) +// provided decorator that wraps an inner http.ResponseWriter object. +// The user-provided decorator object must return the inner (decorated) +// http.ResponseWriter object via the Unwrap function. +type UserProvidedDecorator interface { + http.ResponseWriter + + // Unwrap returns the inner http.ResponseWriter object associated + // with the user-provided decorator. + Unwrap() http.ResponseWriter +} + +// WrapForHTTP1Or2 accepts a user-provided decorator of an "inner" http.responseWriter +// object and potentially wraps the user-provided decorator with a new http.ResponseWriter +// object that implements http.CloseNotifier, http.Flusher, and/or http.Hijacker by +// delegating to the user-provided decorator (if it implements the relevant method) or +// the inner http.ResponseWriter (otherwise), so that the returned http.ResponseWriter +// object implements the same subset of those interfaces as the inner http.ResponseWriter. +// +// This function handles the following three casses. +// - The inner ResponseWriter implements `http.CloseNotifier`, `http.Flusher`, +// and `http.Hijacker` (an HTTP/1.1 sever provides such a ResponseWriter). +// - The inner ResponseWriter implements `http.CloseNotifier` and `http.Flusher` +// but not `http.Hijacker` (an HTTP/2 server provides such a ResponseWriter). +// - All the other cases collapse to this one, in which the given ResponseWriter is returned. +// +// There are three applicable terms: +// - "outer": this is the ResponseWriter object returned by the WrapForHTTP1Or2 function. +// - "user-provided decorator" or "middle": this is the user-provided decorator +// that decorates an inner ResponseWriter object. A user-provided decorator +// implements the UserProvidedDecorator interface. A user-provided decorator +// may or may not implement http.CloseNotifier, http.Flusher or http.Hijacker. +// - "inner": the ResponseWriter that the user-provided decorator extends. +func WrapForHTTP1Or2(decorator UserProvidedDecorator) http.ResponseWriter { + // from go net/http documentation: + // The default HTTP/1.x and HTTP/2 ResponseWriter implementations support Flusher + // Handlers should always test for this ability at runtime. + // + // The Hijacker interface is implemented by ResponseWriters that allow an HTTP handler + // to take over the connection. + // The default ResponseWriter for HTTP/1.x connections supports Hijacker, but HTTP/2 connections + // intentionally do not. ResponseWriter wrappers may also not support Hijacker. + // Handlers should always test for this ability at runtime + // + // The CloseNotifier interface is implemented by ResponseWriters which allow detecting + // when the underlying connection has gone away. + // Deprecated: the CloseNotifier interface predates Go's context package. + // New code should use Request.Context instead. + inner := decorator.Unwrap() + if innerNotifierFlusher, ok := inner.(CloseNotifierFlusher); ok { + // for HTTP/2 request, the default ResponseWriter object (http2responseWriter) + // implements Flusher and CloseNotifier. + outerHTTP2 := outerWithCloseNotifyAndFlush{ + UserProvidedDecorator: decorator, + InnerCloseNotifierFlusher: innerNotifierFlusher, + } + + if innerHijacker, hijackable := inner.(http.Hijacker); hijackable { + // for HTTP/1.x request the default implementation of ResponseWriter + // also implement CloseNotifier, Flusher and Hijacker + return &outerWithCloseNotifyFlushAndHijack{ + outerWithCloseNotifyAndFlush: outerHTTP2, + InnerHijacker: innerHijacker, + } + } + + return outerHTTP2 + } + + // we should never be here for either http/1.x or http2 request + return decorator +} + +// CloseNotifierFlusher is a combination of http.CloseNotifier and http.Flusher +// This applies to both http/1.x and http2 requests. +type CloseNotifierFlusher interface { + http.CloseNotifier + http.Flusher +} + +// GetOriginal goes through the chain of wrapped http.ResponseWriter objects +// and returns the original http.ResponseWriter object provided to the first +// request handler in the filter chain. +func GetOriginal(w http.ResponseWriter) http.ResponseWriter { + decorator, ok := w.(UserProvidedDecorator) + if !ok { + return w + } + + inner := decorator.Unwrap() + if inner == w { + // infinite cycle here, we should never be here though. + panic("http.ResponseWriter decorator chain has a cycle") + } + + return GetOriginal(inner) +} + +//nolint:staticcheck // SA1019 +var _ http.CloseNotifier = outerWithCloseNotifyAndFlush{} +var _ http.Flusher = outerWithCloseNotifyAndFlush{} +var _ http.ResponseWriter = outerWithCloseNotifyAndFlush{} +var _ UserProvidedDecorator = outerWithCloseNotifyAndFlush{} + +// outerWithCloseNotifyAndFlush is the outer object that extends the +// user provied decorator with http.CloseNotifier and http.Flusher only. +type outerWithCloseNotifyAndFlush struct { + // UserProvidedDecorator is the user-provided object, it decorates + // an inner ResponseWriter object. + UserProvidedDecorator + + // http.CloseNotifier and http.Flusher for the inner object + InnerCloseNotifierFlusher CloseNotifierFlusher +} + +func (wr outerWithCloseNotifyAndFlush) CloseNotify() <-chan bool { + if notifier, ok := wr.UserProvidedDecorator.(http.CloseNotifier); ok { + return notifier.CloseNotify() + } + + return wr.InnerCloseNotifierFlusher.CloseNotify() +} + +func (wr outerWithCloseNotifyAndFlush) Flush() { + if flusher, ok := wr.UserProvidedDecorator.(http.Flusher); ok { + flusher.Flush() + return + } + + wr.InnerCloseNotifierFlusher.Flush() +} + +//lint:file-ignore SA1019 Keep supporting deprecated http.CloseNotifier +var _ http.CloseNotifier = outerWithCloseNotifyFlushAndHijack{} +var _ http.Flusher = outerWithCloseNotifyFlushAndHijack{} +var _ http.Hijacker = outerWithCloseNotifyFlushAndHijack{} +var _ http.ResponseWriter = outerWithCloseNotifyFlushAndHijack{} +var _ UserProvidedDecorator = outerWithCloseNotifyFlushAndHijack{} + +// outerWithCloseNotifyFlushAndHijack is the outer object that extends the +// user-provided decorator with http.CloseNotifier, http.Flusher and http.Hijacker. +// This applies to http/1.x requests only. +type outerWithCloseNotifyFlushAndHijack struct { + outerWithCloseNotifyAndFlush + + // http.Hijacker for the inner object + InnerHijacker http.Hijacker +} + +func (wr outerWithCloseNotifyFlushAndHijack) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if hijacker, ok := wr.UserProvidedDecorator.(http.Hijacker); ok { + return hijacker.Hijack() + } + + return wr.InnerHijacker.Hijack() +} diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index 645e67e7e..ffab26970 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -170,6 +170,35 @@ const ( // // Add support for distributed tracing in the API Server APIServerTracing featuregate.Feature = "APIServerTracing" + + // owner: @jiahuif + // kep: http://kep.k8s.io/2887 + // alpha: v1.23 + // + // Enables populating "enum" field of OpenAPI schemas + // in the spec returned from kube-apiserver. + OpenAPIEnums featuregate.Feature = "OpenAPIEnums" + + // owner: @cici37 + // kep: http://kep.k8s.io/2876 + // alpha: v1.23 + // + // Enables expression validation for Custom Resource + CustomResourceValidationExpressions featuregate.Feature = "CustomResourceValidationExpressions" + + // owner: @jefftree + // kep: http://kep.k8s.io/2896 + // alpha: v1.23 + // + // Enables kubernetes to publish OpenAPI v3 + OpenAPIV3 featuregate.Feature = "OpenAPIV3" + + // owner: @kevindelgado + // kep: http://kep.k8s.io/2885 + // alpha: v1.23 + // + // Enables server-side field validation. + ServerSideFieldValidation featuregate.Feature = "ServerSideFieldValidation" ) func init() { @@ -180,22 +209,26 @@ func init() { // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - StreamingProxyRedirects: {Default: false, PreRelease: featuregate.Deprecated}, - ValidateProxyRedirects: {Default: true, PreRelease: featuregate.Deprecated}, - AdvancedAuditing: {Default: true, PreRelease: featuregate.GA}, - APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, - APIListChunking: {Default: true, PreRelease: featuregate.Beta}, - DryRun: {Default: true, PreRelease: featuregate.GA}, - RemainingItemCount: {Default: true, PreRelease: featuregate.Beta}, - ServerSideApply: {Default: true, PreRelease: featuregate.GA}, - StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, - StorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha}, - WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta}, - RemoveSelfLink: {Default: true, PreRelease: featuregate.Beta}, - SelectorIndex: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - WarningHeaders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - EfficientWatchResumption: {Default: true, PreRelease: featuregate.Beta}, - APIServerIdentity: {Default: false, PreRelease: featuregate.Alpha}, - APIServerTracing: {Default: false, PreRelease: featuregate.Alpha}, + StreamingProxyRedirects: {Default: false, PreRelease: featuregate.Deprecated}, + ValidateProxyRedirects: {Default: true, PreRelease: featuregate.Deprecated}, + AdvancedAuditing: {Default: true, PreRelease: featuregate.GA}, + APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, + APIListChunking: {Default: true, PreRelease: featuregate.Beta}, + DryRun: {Default: true, PreRelease: featuregate.GA}, + RemainingItemCount: {Default: true, PreRelease: featuregate.Beta}, + ServerSideApply: {Default: true, PreRelease: featuregate.GA}, + StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, + StorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha}, + WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta}, + RemoveSelfLink: {Default: true, PreRelease: featuregate.Beta}, + SelectorIndex: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + WarningHeaders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + EfficientWatchResumption: {Default: true, PreRelease: featuregate.Beta}, + APIServerIdentity: {Default: false, PreRelease: featuregate.Alpha}, + APIServerTracing: {Default: false, PreRelease: featuregate.Alpha}, + OpenAPIEnums: {Default: false, PreRelease: featuregate.Alpha}, + CustomResourceValidationExpressions: {Default: false, PreRelease: featuregate.Alpha}, + OpenAPIV3: {Default: false, PreRelease: featuregate.Alpha}, + ServerSideFieldValidation: {Default: false, PreRelease: featuregate.Alpha}, } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/options.go b/vendor/k8s.io/apiserver/pkg/registry/generic/options.go index 577192b62..d675a258f 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/options.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/options.go @@ -22,18 +22,20 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/storagebackend" + flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" "k8s.io/client-go/tools/cache" ) -// RESTOptions is set of configuration options to generic registries. +// RESTOptions is set of resource-specific configuration options to generic registries. type RESTOptions struct { - StorageConfig *storagebackend.Config + StorageConfig *storagebackend.ConfigForResource Decorator StorageDecorator - EnableGarbageCollection bool - DeleteCollectionWorkers int - ResourcePrefix string - CountMetricPollPeriod time.Duration + EnableGarbageCollection bool + DeleteCollectionWorkers int + ResourcePrefix string + CountMetricPollPeriod time.Duration + StorageObjectCountTracker flowcontrolrequest.StorageObjectCountTracker } // Implement RESTOptionsGetter so that RESTOptions can directly be used when available (i.e. tests) diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go index f2fa59723..6a4426ee6 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -36,7 +36,7 @@ import ( // Creates a cacher based given storageConfig. func StorageWithCacher() generic.StorageDecorator { return func( - storageConfig *storagebackend.Config, + storageConfig *storagebackend.ConfigForResource, resourcePrefix string, keyFunc func(obj runtime.Object) (string, error), newFunc func() runtime.Object, diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go index 7a626855a..4397f631e 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -44,6 +44,7 @@ import ( storeerr "k8s.io/apiserver/pkg/storage/errors" "k8s.io/apiserver/pkg/storage/etcd3/metrics" "k8s.io/apiserver/pkg/util/dryrun" + flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" "k8s.io/client-go/tools/cache" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" @@ -292,7 +293,7 @@ func (e *Store) NamespaceScoped() bool { return e.UpdateStrategy.NamespaceScoped() } - panic("programmer error: no CRUD for resource, you're crazy, override NamespaceScoped too") + panic("programmer error: no CRUD for resource, override NamespaceScoped too") } // GetCreateStrategy implements GenericStore. @@ -359,6 +360,9 @@ func (e *Store) ListPredicate(ctx context.Context, p storage.SelectionPredicate, func finishNothing(context.Context, bool) {} // Create inserts a new item according to the unique key from the object. +// Note that registries may mutate the input object (e.g. in the strategy +// hooks). Tests which call this might want to call DeepCopy if they expect to +// be able to examine the input and output objects for differences. func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { var finishCreate FinishFunc = finishNothing @@ -400,7 +404,7 @@ func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation out := e.NewFunc() if err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil { err = storeerr.InterpretCreateError(err, qualifiedResource, name) - err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj) + err = rest.CheckGeneratedNameError(ctx, e.CreateStrategy, err, obj) if !apierrors.IsAlreadyExists(err) { return nil, err } @@ -655,7 +659,7 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj } if creating { err = storeerr.InterpretCreateError(err, qualifiedResource, name) - err = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj) + err = rest.CheckGeneratedNameError(ctx, e.CreateStrategy, err, creatingObj) } else { err = storeerr.InterpretUpdateError(err, qualifiedResource, name) } @@ -681,8 +685,9 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj // create-on-update path. func newCreateOptionsFromUpdateOptions(in *metav1.UpdateOptions) *metav1.CreateOptions { co := &metav1.CreateOptions{ - DryRun: in.DryRun, - FieldManager: in.FieldManager, + DryRun: in.DryRun, + FieldManager: in.FieldManager, + FieldValidation: in.FieldValidation, } co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions")) return co @@ -749,9 +754,9 @@ func shouldOrphanDependents(ctx context.Context, e *Store, accessor metav1.Objec } // An explicit policy was set at deletion time, that overrides everything - //lint:ignore SA1019 backwards compatibility + //nolint:staticcheck // SA1019 backwards compatibility if options != nil && options.OrphanDependents != nil { - //lint:ignore SA1019 backwards compatibility + //nolint:staticcheck // SA1019 backwards compatibility return *options.OrphanDependents } if options != nil && options.PropagationPolicy != nil { @@ -792,7 +797,7 @@ func shouldDeleteDependents(ctx context.Context, e *Store, accessor metav1.Objec } // If an explicit policy was set at deletion time, that overrides both - //lint:ignore SA1019 backwards compatibility + //nolint:staticcheck // SA1019 backwards compatibility if options != nil && options.OrphanDependents != nil { return false } @@ -1123,13 +1128,21 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali wg := sync.WaitGroup{} toProcess := make(chan int, 2*workersNumber) errs := make(chan error, workersNumber+1) + workersExited := make(chan struct{}) + distributorExited := make(chan struct{}) go func() { defer utilruntime.HandleCrash(func(panicReason interface{}) { errs <- fmt.Errorf("DeleteCollection distributor panicked: %v", panicReason) }) + defer close(distributorExited) for i := 0; i < len(items); i++ { - toProcess <- i + select { + case toProcess <- i: + case <-workersExited: + klog.V(4).InfoS("workers already exited, and there are some items waiting to be processed", "finished", i, "total", len(items)) + return + } } close(toProcess) }() @@ -1162,6 +1175,9 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali }() } wg.Wait() + // notify distributor to exit + close(workersExited) + <-distributorExited select { case err := <-errs: return nil, err @@ -1413,7 +1429,7 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { e.StorageVersioner = opts.StorageConfig.EncodeVersioner if opts.CountMetricPollPeriod > 0 { - stopFunc := e.startObservingCount(opts.CountMetricPollPeriod) + stopFunc := e.startObservingCount(opts.CountMetricPollPeriod, opts.StorageObjectCountTracker) previousDestroy := e.DestroyFunc e.DestroyFunc = func() { stopFunc() @@ -1428,7 +1444,7 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { } // startObservingCount starts monitoring given prefix and periodically updating metrics. It returns a function to stop collection. -func (e *Store) startObservingCount(period time.Duration) func() { +func (e *Store) startObservingCount(period time.Duration, objectCountTracker flowcontrolrequest.StorageObjectCountTracker) func() { prefix := e.KeyRootFunc(genericapirequest.NewContext()) resourceName := e.DefaultQualifiedResource.String() klog.V(2).InfoS("Monitoring resource count at path", "resource", resourceName, "path", "/"+prefix) @@ -1437,9 +1453,12 @@ func (e *Store) startObservingCount(period time.Duration) func() { count, err := e.Storage.Count(prefix) if err != nil { klog.V(5).InfoS("Failed to update storage count metric", "err", err) - metrics.UpdateObjectCount(resourceName, -1) - } else { - metrics.UpdateObjectCount(resourceName, count) + count = -1 + } + + metrics.UpdateObjectCount(resourceName, count) + if objectCountTracker != nil { + objectCountTracker.Set(resourceName, count) } }, period, resourceCountPollPeriodJitter, true, stopCh) return func() { close(stopCh) } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go index e0ca2df04..715aa1047 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -27,7 +27,7 @@ import ( // StorageDecorator is a function signature for producing a storage.Interface // and an associated DestroyFunc from given parameters. type StorageDecorator func( - config *storagebackend.Config, + config *storagebackend.ConfigForResource, resourcePrefix string, keyFunc func(obj runtime.Object) (string, error), newFunc func() runtime.Object, @@ -39,7 +39,7 @@ type StorageDecorator func( // UndecoratedStorage returns the given a new storage from the given config // without any decoration. func UndecoratedStorage( - config *storagebackend.Config, + config *storagebackend.ConfigForResource, resourcePrefix string, keyFunc func(obj runtime.Object) (string, error), newFunc func() runtime.Object, @@ -53,6 +53,6 @@ func UndecoratedStorage( // NewRawStorage creates the low level kv storage. This is a work-around for current // two layer of same storage interface. // TODO: Once cacher is enabled on all registries (event registry is special), we will remove this method. -func NewRawStorage(config *storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, factory.DestroyFunc, error) { +func NewRawStorage(config *storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, factory.DestroyFunc, error) { return factory.Create(*config, newFunc) } diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/create.go b/vendor/k8s.io/apiserver/pkg/registry/rest/create.go index 950260e45..942ee4774 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/create.go +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/create.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/admission" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage/names" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -109,6 +110,7 @@ func BeforeCreate(strategy RESTCreateStrategy, ctx context.Context, obj runtime. objectMeta.SetDeletionGracePeriodSeconds(nil) strategy.PrepareForCreate(ctx, obj) FillObjectMetaSystemFields(objectMeta) + if len(objectMeta.GetGenerateName()) > 0 && len(objectMeta.GetName()) == 0 { objectMeta.SetName(strategy.GenerateName(objectMeta.GetGenerateName())) } @@ -145,21 +147,31 @@ func BeforeCreate(strategy RESTCreateStrategy, ctx context.Context, obj runtime. // CheckGeneratedNameError checks whether an error that occurred creating a resource is due // to generation being unable to pick a valid name. -func CheckGeneratedNameError(strategy RESTCreateStrategy, err error, obj runtime.Object) error { +func CheckGeneratedNameError(ctx context.Context, strategy RESTCreateStrategy, err error, obj runtime.Object) error { if !errors.IsAlreadyExists(err) { return err } - objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) + objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj) if kerr != nil { return kerr } if len(objectMeta.GetGenerateName()) == 0 { + // If we don't have a generated name, return the original error (AlreadyExists). + // When we're here, the user picked a name that is causing a conflict. return err } - return errors.NewServerTimeoutForKind(kind.GroupKind(), "POST", 0) + // Get the group resource information from the context, if populated. + gr := schema.GroupResource{} + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + gr = schema.GroupResource{Group: gvk.Group, Resource: requestInfo.Resource} + } + + // If we have a name and generated name, the server picked a name + // that already exists. + return errors.NewGenerateNameConflict(gr, objectMeta.GetName(), 1) } // objectMetaAndKind retrieves kind and ObjectMeta from a runtime object, or returns an error. diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index c4bbe1511..fb3179c46 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -35,13 +35,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" "k8s.io/apimachinery/pkg/version" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" - auditpolicy "k8s.io/apiserver/pkg/audit/policy" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union" @@ -74,6 +72,7 @@ import ( "k8s.io/klog/v2" openapicommon "k8s.io/kube-openapi/pkg/common" "k8s.io/kube-openapi/pkg/validation/spec" + "k8s.io/utils/clock" utilsnet "k8s.io/utils/net" // install apis @@ -135,8 +134,8 @@ type Config struct { Version *version.Info // AuditBackend is where audit events are sent to. AuditBackend audit.Backend - // AuditPolicyChecker makes the decision of whether and how to audit log a request. - AuditPolicyChecker auditpolicy.Checker + // AuditPolicyRuleEvaluator makes the decision of whether and how to audit log a request. + AuditPolicyRuleEvaluator audit.PolicyRuleEvaluator // ExternalAddress is the host name to use for external (public internet) facing URLs (e.g. Swagger) // Will default to a value based on secure serving info and available ipv4 IPs. ExternalAddress string @@ -222,14 +221,24 @@ type Config struct { // If not specify any in flags, then genericapiserver will only enable defaultAPIResourceConfig. MergedResourceConfig *serverstore.ResourceConfig - // RequestWidthEstimator is used to estimate the "width" of the incoming request(s). - RequestWidthEstimator flowcontrolrequest.WidthEstimatorFunc - // lifecycleSignals provides access to the various signals // that happen during lifecycle of the apiserver. // it's intentionally marked private as it should never be overridden. lifecycleSignals lifecycleSignals + // StorageObjectCountTracker is used to keep track of the total number of objects + // in the storage per resource, so we can estimate width of incoming requests. + StorageObjectCountTracker flowcontrolrequest.StorageObjectCountTracker + + // ShutdownSendRetryAfter dictates when to initiate shutdown of the HTTP + // Server during the graceful termination of the apiserver. If true, we wait + // for non longrunning requests in flight to be drained and then initiate a + // shutdown of the HTTP Server. If false, we initiate a shutdown of the HTTP + // Server as soon as ShutdownDelayDuration has elapsed. + // If enabled, after ShutdownDelayDuration elapses, any incoming request is + // rejected with a 429 status code and a 'Retry-After' response. + ShutdownSendRetryAfter bool + //=========================================================================== // values below here are targets for removal //=========================================================================== @@ -315,6 +324,8 @@ func NewConfig(codecs serializer.CodecFactory) *Config { if feature.DefaultFeatureGate.Enabled(features.APIServerIdentity) { id = "kube-apiserver-" + uuid.New().String() } + lifecycleSignals := newLifecycleSignals() + return &Config{ Serializer: codecs, BuildHandlerChainFunc: DefaultBuildHandlerChain, @@ -352,9 +363,9 @@ func NewConfig(codecs serializer.CodecFactory) *Config { // Default to treating watch as a long-running operation // Generic API servers have no inherent long-running subresources - LongRunningFunc: genericfilters.BasicLongRunningRequestCheck(sets.NewString("watch"), sets.NewString()), - RequestWidthEstimator: flowcontrolrequest.DefaultWidthEstimator, - lifecycleSignals: newLifecycleSignals(), + LongRunningFunc: genericfilters.BasicLongRunningRequestCheck(sets.NewString("watch"), sets.NewString()), + lifecycleSignals: lifecycleSignals, + StorageObjectCountTracker: flowcontrolrequest.NewStorageObjectCountTracker(lifecycleSignals.ShutdownInitiated.Signaled()), APIServerID: id, StorageVersionManager: storageversion.NewDefaultManager(), @@ -567,6 +578,7 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G handlerChainBuilder := func(handler http.Handler) http.Handler { return c.BuildHandlerChainFunc(handler, c.Config) } + apiServerHandler := NewAPIServerHandler(name, c.Serializer, handlerChainBuilder, delegationTarget.UnprotectedHandler()) s := &GenericAPIServer{ @@ -580,6 +592,9 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G delegationTarget: delegationTarget, EquivalentResourceRegistry: c.EquivalentResourceRegistry, HandlerChainWaitGroup: c.HandlerChainWaitGroup, + Handler: apiServerHandler, + + listedPathProvider: apiServerHandler, minRequestTimeout: time.Duration(c.MinRequestTimeout) * time.Second, ShutdownTimeout: c.RequestTimeout, @@ -587,10 +602,6 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G SecureServingInfo: c.SecureServing, ExternalAddress: c.ExternalAddress, - Handler: apiServerHandler, - - listedPathProvider: apiServerHandler, - openAPIConfig: c.OpenAPIConfig, skipOpenAPIInstallation: c.SkipOpenAPIInstallation, @@ -608,12 +619,15 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G maxRequestBodyBytes: c.MaxRequestBodyBytes, livezClock: clock.RealClock{}, - lifecycleSignals: c.lifecycleSignals, + lifecycleSignals: c.lifecycleSignals, + ShutdownSendRetryAfter: c.ShutdownSendRetryAfter, APIServerID: c.APIServerID, StorageVersionManager: c.StorageVersionManager, Version: c.Version, + + muxAndDiscoveryCompleteSignals: map[string]<-chan struct{}{}, } for { @@ -645,6 +659,13 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G } } + // register mux signals from the delegated server + for k, v := range delegationTarget.MuxAndDiscoveryCompleteSignals() { + if err := s.RegisterMuxAndDiscoveryCompleteSignal(k, v); err != nil { + return nil, err + } + } + genericApiServerHookName := "generic-apiserver-start-informers" if c.SharedInformerFactory != nil { if !s.isPostStartHookRegistered(genericApiServerHookName) { @@ -741,13 +762,15 @@ func BuildHandlerChainWithStorageVersionPrecondition(apiHandler http.Handler, c } func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { - handler := filterlatency.TrackCompleted(apiHandler) + handler := genericapifilters.WithWebhookDuration(apiHandler) + handler = filterlatency.TrackCompleted(handler) handler = genericapifilters.WithAuthorization(handler, c.Authorization.Authorizer, c.Serializer) handler = filterlatency.TrackStarted(handler, "authorization") if c.FlowControl != nil { + requestWorkEstimator := flowcontrolrequest.NewWorkEstimator(c.StorageObjectCountTracker.Get, c.FlowControl.GetInterestedWatchCount) handler = filterlatency.TrackCompleted(handler) - handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, c.RequestWidthEstimator) + handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator) handler = filterlatency.TrackStarted(handler, "priorityandfairness") } else { handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.LongRunningFunc) @@ -758,11 +781,11 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = filterlatency.TrackStarted(handler, "impersonation") handler = filterlatency.TrackCompleted(handler) - handler = genericapifilters.WithAudit(handler, c.AuditBackend, c.AuditPolicyChecker, c.LongRunningFunc) + handler = genericapifilters.WithAudit(handler, c.AuditBackend, c.AuditPolicyRuleEvaluator, c.LongRunningFunc) handler = filterlatency.TrackStarted(handler, "audit") failedHandler := genericapifilters.Unauthorized(c.Serializer) - failedHandler = genericapifilters.WithFailedAuthenticationAudit(failedHandler, c.AuditBackend, c.AuditPolicyChecker) + failedHandler = genericapifilters.WithFailedAuthenticationAudit(failedHandler, c.AuditBackend, c.AuditPolicyRuleEvaluator) failedHandler = filterlatency.TrackCompleted(failedHandler) handler = filterlatency.TrackCompleted(handler) @@ -775,22 +798,26 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { // context with deadline. The go-routine can keep running, while the timeout logic will return a timeout to the client. handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.LongRunningFunc) - handler = genericapifilters.WithRequestDeadline(handler, c.AuditBackend, c.AuditPolicyChecker, + handler = genericapifilters.WithRequestDeadline(handler, c.AuditBackend, c.AuditPolicyRuleEvaluator, c.LongRunningFunc, c.Serializer, c.RequestTimeout) handler = genericfilters.WithWaitGroup(handler, c.LongRunningFunc, c.HandlerChainWaitGroup) if c.SecureServing != nil && !c.SecureServing.DisableHTTP2 && c.GoawayChance > 0 { handler = genericfilters.WithProbabilisticGoaway(handler, c.GoawayChance) } - handler = genericapifilters.WithAuditAnnotations(handler, c.AuditBackend, c.AuditPolicyChecker) + handler = genericapifilters.WithAuditAnnotations(handler, c.AuditBackend, c.AuditPolicyRuleEvaluator) handler = genericapifilters.WithWarningRecorder(handler) handler = genericapifilters.WithCacheControl(handler) handler = genericfilters.WithHSTS(handler, c.HSTSDirectives) + if c.ShutdownSendRetryAfter { + handler = genericfilters.WithRetryAfter(handler, c.lifecycleSignals.AfterShutdownDelayDuration.Signaled()) + } handler = genericfilters.WithHTTPLogging(handler) if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) { handler = genericapifilters.WithTracing(handler, c.TracerProvider) } handler = genericapifilters.WithRequestInfo(handler, c.RequestInfoResolver) handler = genericapifilters.WithRequestReceivedTimestamp(handler) + handler = genericapifilters.WithMuxAndDiscoveryComplete(handler, c.lifecycleSignals.MuxAndDiscoveryComplete.Signaled()) handler = genericfilters.WithPanicRecovery(handler, c.RequestInfoResolver) handler = genericapifilters.WithAuditID(handler) return handler diff --git a/vendor/k8s.io/apiserver/pkg/server/deleted_kinds.go b/vendor/k8s.io/apiserver/pkg/server/deleted_kinds.go index f9735a839..ab10774cb 100644 --- a/vendor/k8s.io/apiserver/pkg/server/deleted_kinds.go +++ b/vendor/k8s.io/apiserver/pkg/server/deleted_kinds.go @@ -54,10 +54,7 @@ type ResourceExpirationEvaluator interface { } func NewResourceExpirationEvaluator(currentVersion apimachineryversion.Info) (ResourceExpirationEvaluator, error) { - ret := &resourceExpirationEvaluator{ - // TODO https://github.com/kubernetes/kubernetes/issues/101951 set this back to false after beta is tagged. - strictRemovedHandlingInAlpha: true, - } + ret := &resourceExpirationEvaluator{} if len(currentVersion.Major) > 0 { currentMajor64, err := strconv.ParseInt(currentVersion.Major, 10, 32) if err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go b/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go index 1de20682a..04375d1dd 100644 --- a/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go @@ -45,6 +45,9 @@ func (s *DeprecatedInsecureServingInfo) Serve(handler http.Handler, shutdownTime Addr: s.Listener.Addr().String(), Handler: handler, MaxHeaderBytes: 1 << 20, + + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadHeaderTimeout: 32 * time.Second, // just shy of requestTimeoutUpperBound } if len(s.Name) > 0 { diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go index 15f3c1dad..fb1515c18 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go @@ -24,6 +24,7 @@ import ( "sync/atomic" "time" + "github.com/fsnotify/fsnotify" "k8s.io/client-go/util/cert" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -44,7 +45,7 @@ type ControllerRunner interface { Run(workers int, stopCh <-chan struct{}) } -// DynamicFileCAContent provies a CAContentProvider that can dynamically react to new file content +// DynamicFileCAContent provides a CAContentProvider that can dynamically react to new file content // It also fulfills the authenticator interface to provide verifyoptions type DynamicFileCAContent struct { name string @@ -147,7 +148,7 @@ func (c *DynamicFileCAContent) RunOnce() error { return c.loadCABundle() } -// Run starts the kube-apiserver and blocks until stopCh is closed. +// Run starts the controller and blocks until stopCh is closed. func (c *DynamicFileCAContent) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() @@ -158,17 +159,62 @@ func (c *DynamicFileCAContent) Run(workers int, stopCh <-chan struct{}) { // doesn't matter what workers say, only start one. go wait.Until(c.runWorker, time.Second, stopCh) - // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. - go wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { - c.queue.Add(workItemKey) - return false, nil - }, stopCh) - - // TODO this can be wired to an fsnotifier as well. + // start the loop that watches the CA file until stopCh is closed. + go wait.Until(func() { + if err := c.watchCAFile(stopCh); err != nil { + klog.ErrorS(err, "Failed to watch CA file, will retry later") + } + }, time.Minute, stopCh) <-stopCh } +func (c *DynamicFileCAContent) watchCAFile(stopCh <-chan struct{}) error { + // Trigger a check here to ensure the content will be checked periodically even if the following watch fails. + c.queue.Add(workItemKey) + + w, err := fsnotify.NewWatcher() + if err != nil { + return fmt.Errorf("error creating fsnotify watcher: %v", err) + } + defer w.Close() + + if err = w.Add(c.filename); err != nil { + return fmt.Errorf("error adding watch for file %s: %v", c.filename, err) + } + // Trigger a check in case the file is updated before the watch starts. + c.queue.Add(workItemKey) + + for { + select { + case e := <-w.Events: + if err := c.handleWatchEvent(e, w); err != nil { + return err + } + case err := <-w.Errors: + return fmt.Errorf("received fsnotify error: %v", err) + case <-stopCh: + return nil + } + } +} + +// handleWatchEvent triggers reloading the CA file, and restarts a new watch if it's a Remove or Rename event. +func (c *DynamicFileCAContent) handleWatchEvent(e fsnotify.Event, w *fsnotify.Watcher) error { + // This should be executed after restarting the watch (if applicable) to ensure no file event will be missing. + defer c.queue.Add(workItemKey) + if e.Op&(fsnotify.Remove|fsnotify.Rename) == 0 { + return nil + } + if err := w.Remove(c.filename); err != nil { + klog.InfoS("Failed to remove file watch, it may have been deleted", "file", c.filename, "err", err) + } + if err := w.Add(c.filename); err != nil { + return fmt.Errorf("error adding watch for file %s: %v", c.filename, err) + } + return nil +} + func (c *DynamicFileCAContent) runWorker() { for c.processNextWorkItem() { } diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go index de79fb58f..00117176b 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go @@ -23,6 +23,8 @@ import ( "sync/atomic" "time" + "github.com/fsnotify/fsnotify" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" @@ -38,7 +40,7 @@ type DynamicCertKeyPairContent struct { // keyFile is the name of the key file to read. keyFile string - // servingCert is a certKeyContent that contains the last read, non-zero length content of the key and cert + // certKeyPair is a certKeyContent that contains the last read, non-zero length content of the key and cert certKeyPair atomic.Value listeners []Listener @@ -75,7 +77,7 @@ func (c *DynamicCertKeyPairContent) AddListener(listener Listener) { c.listeners = append(c.listeners, listener) } -// loadServingCert determines the next set of content for the file. +// loadCertKeyPair determines the next set of content for the file. func (c *DynamicCertKeyPairContent) loadCertKeyPair() error { cert, err := ioutil.ReadFile(c.certFile) if err != nil { @@ -132,17 +134,68 @@ func (c *DynamicCertKeyPairContent) Run(workers int, stopCh <-chan struct{}) { // doesn't matter what workers say, only start one. go wait.Until(c.runWorker, time.Second, stopCh) - // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. - go wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { - c.queue.Add(workItemKey) - return false, nil - }, stopCh) - - // TODO this can be wired to an fsnotifier as well. + // start the loop that watches the cert and key files until stopCh is closed. + go wait.Until(func() { + if err := c.watchCertKeyFile(stopCh); err != nil { + klog.ErrorS(err, "Failed to watch cert and key file, will retry later") + } + }, time.Minute, stopCh) <-stopCh } +func (c *DynamicCertKeyPairContent) watchCertKeyFile(stopCh <-chan struct{}) error { + // Trigger a check here to ensure the content will be checked periodically even if the following watch fails. + c.queue.Add(workItemKey) + + w, err := fsnotify.NewWatcher() + if err != nil { + return fmt.Errorf("error creating fsnotify watcher: %v", err) + } + defer w.Close() + + if err := w.Add(c.certFile); err != nil { + return fmt.Errorf("error adding watch for file %s: %v", c.certFile, err) + } + if err := w.Add(c.keyFile); err != nil { + return fmt.Errorf("error adding watch for file %s: %v", c.keyFile, err) + } + // Trigger a check in case the file is updated before the watch starts. + c.queue.Add(workItemKey) + + for { + select { + case e := <-w.Events: + if err := c.handleWatchEvent(e, w); err != nil { + return err + } + case err := <-w.Errors: + return fmt.Errorf("received fsnotify error: %v", err) + case <-stopCh: + return nil + } + } +} + +// handleWatchEvent triggers reloading the cert and key file, and restarts a new watch if it's a Remove or Rename event. +// If one file is updated before the other, the loadCertKeyPair method will catch the mismatch and will not apply the +// change. When an event of the other file is received, it will trigger reloading the files again and the new content +// will be loaded and used. +func (c *DynamicCertKeyPairContent) handleWatchEvent(e fsnotify.Event, w *fsnotify.Watcher) error { + // This should be executed after restarting the watch (if applicable) to ensure no file event will be missing. + defer c.queue.Add(workItemKey) + if e.Op&(fsnotify.Remove|fsnotify.Rename) == 0 { + return nil + } + if err := w.Remove(e.Name); err != nil { + klog.InfoS("Failed to remove file watch, it may have been deleted", "file", e.Name, "err", err) + } + if err := w.Add(e.Name); err != nil { + return fmt.Errorf("error adding watch for file %s: %v", e.Name, err) + } + return nil +} + func (c *DynamicCertKeyPairContent) runWorker() { for c.processNextWorkItem() { } diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go index ee0aa8de0..e8be133c0 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go @@ -20,12 +20,12 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "net" "strings" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/klog/v2" + netutils "k8s.io/utils/net" ) // BuildNamedCertificates returns a map of *tls.Certificate by name. It's @@ -77,7 +77,7 @@ func getCertificateNames(cert *x509.Certificate) []string { var names []string cn := cert.Subject.CommonName - cnIsIP := net.ParseIP(cn) != nil + cnIsIP := netutils.ParseIPSloppy(cn) != nil cnIsValidDomain := cn == "*" || len(validation.IsDNS1123Subdomain(strings.TrimPrefix(cn, "*."))) == 0 // don't use the CN if it is a valid IP because our IP serving detection may unexpectedly use it to terminate the connection. if !cnIsIP && cnIsValidDomain { diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go index b7e378e43..2df786e13 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go @@ -34,8 +34,7 @@ import ( var cfgScheme = runtime.NewScheme() // validEgressSelectorNames contains the set of valid egress selctor names. -// 'master' is deprecated in favor of 'controlplane' and will be removed in v1.22. -var validEgressSelectorNames = sets.NewString("master", "controlplane", "cluster", "etcd") +var validEgressSelectorNames = sets.NewString("controlplane", "cluster", "etcd") func init() { install.Install(cfgScheme) @@ -103,27 +102,21 @@ func ValidateEgressSelectorConfiguration(config *apiserver.EgressSelectorConfigu } } - var foundControlPlane, foundMaster bool - for _, service := range config.EgressSelections { + seen := sets.String{} + for i, service := range config.EgressSelections { canonicalName := strings.ToLower(service.Name) - - if !validEgressSelectorNames.Has(canonicalName) { - allErrs = append(allErrs, field.NotSupported(field.NewPath("egressSelection", "name"), canonicalName, validEgressSelectorNames.List())) + fldPath := field.NewPath("service", "connection") + // no duplicate check + if seen.Has(canonicalName) { + allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), canonicalName)) continue } + seen.Insert(canonicalName) - if canonicalName == "master" { - foundMaster = true + if !validEgressSelectorNames.Has(canonicalName) { + allErrs = append(allErrs, field.NotSupported(fldPath, canonicalName, validEgressSelectorNames.List())) + continue } - - if canonicalName == "controlplane" { - foundControlPlane = true - } - } - - // error if both master and controlplane egress selectors are set - if foundMaster && foundControlPlane { - allErrs = append(allErrs, field.Forbidden(field.NewPath("egressSelection", "name"), "both egressSelection names 'master' and 'controlplane' are specified, only one is allowed")) } return allErrs diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go index 7e0185001..9da0e2a09 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go @@ -91,10 +91,6 @@ func (s EgressType) AsNetworkContext() NetworkContext { func lookupServiceName(name string) (EgressType, error) { switch strings.ToLower(name) { - // 'master' is deprecated, interpret "master" as controlplane internally until removed in v1.22. - case "master": - klog.Warning("EgressSelection name 'master' is deprecated, use 'controlplane' instead") - return ControlPlane, nil case "controlplane": return ControlPlane, nil case "etcd": @@ -362,6 +358,16 @@ func NewEgressSelector(config *apiserver.EgressSelectorConfiguration) (*EgressSe return cs, nil } +// NewEgressSelectorWithMap returns a EgressSelector with the supplied EgressType to DialFunc map. +func NewEgressSelectorWithMap(m map[EgressType]utilnet.DialFunc) *EgressSelector { + if m == nil { + m = make(map[EgressType]utilnet.DialFunc) + } + return &EgressSelector{ + egressToDialer: m, + } +} + // Lookup gets the dialer function for the network context. // This is configured for the Kubernetes API Server at startup. func (cs *EgressSelector) Lookup(networkContext NetworkContext) (utilnet.DialFunc, error) { diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go index 04ad61c4f..af3846181 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go @@ -19,9 +19,9 @@ package metrics import ( "time" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/utils/clock" ) const ( diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go index 70c8d8b85..71d8a534b 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -61,13 +61,13 @@ func handleError(w http.ResponseWriter, r *http.Request, err error) { // requestWatermark is used to track maximal numbers of requests in a particular phase of handling type requestWatermark struct { phase string - readOnlyObserver, mutatingObserver fcmetrics.TimedObserver + readOnlyObserver, mutatingObserver fcmetrics.RatioedChangeObserver lock sync.Mutex readOnlyWatermark, mutatingWatermark int } func (w *requestWatermark) recordMutating(mutatingVal int) { - w.mutatingObserver.Set(float64(mutatingVal)) + w.mutatingObserver.Observe(float64(mutatingVal)) w.lock.Lock() defer w.lock.Unlock() @@ -78,7 +78,7 @@ func (w *requestWatermark) recordMutating(mutatingVal int) { } func (w *requestWatermark) recordReadOnly(readOnlyVal int) { - w.readOnlyObserver.Set(float64(readOnlyVal)) + w.readOnlyObserver.Observe(float64(readOnlyVal)) w.lock.Lock() defer w.lock.Unlock() @@ -132,11 +132,11 @@ func WithMaxInFlightLimit( var mutatingChan chan bool if nonMutatingLimit != 0 { nonMutatingChan = make(chan bool, nonMutatingLimit) - watermark.readOnlyObserver.SetX1(float64(nonMutatingLimit)) + watermark.readOnlyObserver.SetDenominator(float64(nonMutatingLimit)) } if mutatingLimit != 0 { mutatingChan = make(chan bool, mutatingLimit) - watermark.mutatingObserver.SetX1(float64(mutatingLimit)) + watermark.mutatingObserver.SetDenominator(float64(mutatingLimit)) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go index c89cbbd86..8b95021c7 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -22,11 +22,13 @@ import ( "net/http" "runtime" "sync/atomic" + "time" - flowcontrol "k8s.io/api/flowcontrol/v1beta1" + flowcontrol "k8s.io/api/flowcontrol/v1beta2" apitypes "k8s.io/apimachinery/pkg/types" epmetrics "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/server/httplog" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" @@ -55,13 +57,22 @@ var atomicMutatingWaiting, atomicReadOnlyWaiting int32 // newInitializationSignal is defined for testing purposes. var newInitializationSignal = utilflowcontrol.NewInitializationSignal +func truncateLogField(s string) string { + const maxFieldLogLength = 64 + + if len(s) > maxFieldLogLength { + s = s[0:maxFieldLogLength] + } + return s +} + // WithPriorityAndFairness limits the number of in-flight // requests in a fine-grained way. func WithPriorityAndFairness( handler http.Handler, longRunningRequestCheck apirequest.LongRunningRequestCheck, fcIfc utilflowcontrol.Interface, - widthEstimator flowcontrolrequest.WidthEstimatorFunc, + workEstimator flowcontrolrequest.WorkEstimatorFunc, ) http.Handler { if fcIfc == nil { klog.Warningf("priority and fairness support not found, skipping") @@ -90,12 +101,17 @@ func WithPriorityAndFairness( } var classification *PriorityAndFairnessClassification - note := func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration) { + estimateWork := func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, flowDistinguisher string) flowcontrolrequest.WorkEstimate { classification = &PriorityAndFairnessClassification{ FlowSchemaName: fs.Name, FlowSchemaUID: fs.UID, PriorityLevelName: pl.Name, PriorityLevelUID: pl.UID} + + httplog.AddKeyValue(ctx, "apf_pl", truncateLogField(pl.Name)) + httplog.AddKeyValue(ctx, "apf_fs", truncateLogField(fs.Name)) + httplog.AddKeyValue(ctx, "apf_fd", truncateLogField(flowDistinguisher)) + return workEstimator(r, fs.Name, pl.Name) } var served bool @@ -122,11 +138,10 @@ func WithPriorityAndFairness( } } - // find the estimated "width" of the request - // TODO: Maybe just make it costEstimator and let it return additionalLatency too for the watch? - // TODO: Estimate cost should also take fcIfc.GetWatchCount(requestInfo) as a parameter. - width := widthEstimator.EstimateWidth(r) - digest := utilflowcontrol.RequestDigest{RequestInfo: requestInfo, User: user, Width: width} + digest := utilflowcontrol.RequestDigest{ + RequestInfo: requestInfo, + User: user, + } if isWatchRequest { // This channel blocks calling handler.ServeHTTP() until closed, and is closed inside execute(). @@ -159,12 +174,16 @@ func WithPriorityAndFairness( }() execute := func() { + startedAt := time.Now() + defer func() { + httplog.AddKeyValue(ctx, "apf_init_latency", time.Since(startedAt)) + }() noteExecutingDelta(1) defer noteExecutingDelta(-1) served = true setResponseHeaders(classification, w) - forgetWatch = fcIfc.RegisterWatch(requestInfo) + forgetWatch = fcIfc.RegisterWatch(r) // Notify the main thread that we're ready to start the watch. close(shouldStartWatchCh) @@ -216,7 +235,7 @@ func WithPriorityAndFairness( // Note that Handle will return irrespective of whether the request // executes or is rejected. In the latter case, the function will return // without calling the passed `execute` function. - fcIfc.Handle(handleCtx, digest, note, queueNote, execute) + fcIfc.Handle(handleCtx, digest, estimateWork, queueNote, execute) }() select { @@ -247,7 +266,7 @@ func WithPriorityAndFairness( handler.ServeHTTP(w, r) } - fcIfc.Handle(ctx, digest, note, queueNote, execute) + fcIfc.Handle(ctx, digest, estimateWork, queueNote, execute) } if !served { diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go index 69e4fd4f2..7487fbf1e 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -30,6 +30,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/endpoints/responsewriter" ) // WithTimeoutForNonLongRunningRequests times out non-long-running requests after the time given by timeout. @@ -90,7 +91,8 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // resultCh is used as both errCh and stopCh resultCh := make(chan interface{}) - tw := newTimeoutWriter(w) + var tw timeoutWriter + tw, w = newTimeoutWriter(w) go func() { defer func() { err := recover() @@ -105,7 +107,7 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } resultCh <- err }() - t.handler.ServeHTTP(tw, r) + t.handler.ServeHTTP(w, r) }() select { case err := <-resultCh: @@ -146,24 +148,16 @@ type timeoutWriter interface { timeout(*apierrors.StatusError) } -func newTimeoutWriter(w http.ResponseWriter) timeoutWriter { +func newTimeoutWriter(w http.ResponseWriter) (timeoutWriter, http.ResponseWriter) { base := &baseTimeoutWriter{w: w} + wrapped := responsewriter.WrapForHTTP1Or2(base) - _, notifiable := w.(http.CloseNotifier) - _, hijackable := w.(http.Hijacker) - - switch { - case notifiable && hijackable: - return &closeHijackTimeoutWriter{base} - case notifiable: - return &closeTimeoutWriter{base} - case hijackable: - return &hijackTimeoutWriter{base} - default: - return base - } + return base, wrapped } +var _ http.ResponseWriter = &baseTimeoutWriter{} +var _ responsewriter.UserProvidedDecorator = &baseTimeoutWriter{} + type baseTimeoutWriter struct { w http.ResponseWriter @@ -176,6 +170,10 @@ type baseTimeoutWriter struct { hijacked bool } +func (tw *baseTimeoutWriter) Unwrap() http.ResponseWriter { + return tw.w +} + func (tw *baseTimeoutWriter) Header() http.Header { tw.mu.Lock() defer tw.mu.Unlock() @@ -210,9 +208,9 @@ func (tw *baseTimeoutWriter) Flush() { return } - if flusher, ok := tw.w.(http.Flusher); ok { - flusher.Flush() - } + // the outer ResponseWriter object returned by WrapForHTTP1Or2 implements + // http.Flusher if the inner object (tw.w) implements http.Flusher. + tw.w.(http.Flusher).Flush() } func (tw *baseTimeoutWriter) WriteHeader(code int) { @@ -259,7 +257,7 @@ func (tw *baseTimeoutWriter) timeout(err *apierrors.StatusError) { } } -func (tw *baseTimeoutWriter) closeNotify() <-chan bool { +func (tw *baseTimeoutWriter) CloseNotify() <-chan bool { tw.mu.Lock() defer tw.mu.Unlock() @@ -269,47 +267,24 @@ func (tw *baseTimeoutWriter) closeNotify() <-chan bool { return done } + // the outer ResponseWriter object returned by WrapForHTTP1Or2 implements + // http.CloseNotifier if the inner object (tw.w) implements http.CloseNotifier. return tw.w.(http.CloseNotifier).CloseNotify() } -func (tw *baseTimeoutWriter) hijack() (net.Conn, *bufio.ReadWriter, error) { +func (tw *baseTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { tw.mu.Lock() defer tw.mu.Unlock() if tw.timedOut { return nil, nil, http.ErrHandlerTimeout } + + // the outer ResponseWriter object returned by WrapForHTTP1Or2 implements + // http.Hijacker if the inner object (tw.w) implements http.Hijacker. conn, rw, err := tw.w.(http.Hijacker).Hijack() if err == nil { tw.hijacked = true } return conn, rw, err } - -type closeTimeoutWriter struct { - *baseTimeoutWriter -} - -func (tw *closeTimeoutWriter) CloseNotify() <-chan bool { - return tw.closeNotify() -} - -type hijackTimeoutWriter struct { - *baseTimeoutWriter -} - -func (tw *hijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return tw.hijack() -} - -type closeHijackTimeoutWriter struct { - *baseTimeoutWriter -} - -func (tw *closeHijackTimeoutWriter) CloseNotify() <-chan bool { - return tw.closeNotify() -} - -func (tw *closeHijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return tw.hijack() -} diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go b/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go index 857ce1883..70b32c766 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go @@ -32,6 +32,12 @@ import ( // WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown. func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup) http.Handler { + // NOTE: both WithWaitGroup and WithRetryAfter must use the same exact isRequestExemptFunc 'isRequestExemptFromRetryAfter, + // otherwise SafeWaitGroup might wait indefinitely and will prevent the server from shutting down gracefully. + return withWaitGroup(handler, longRunning, wg, isRequestExemptFromRetryAfter) +} + +func withWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup, isRequestExemptFn isRequestExemptFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() requestInfo, ok := apirequest.RequestInfoFrom(ctx) @@ -41,21 +47,33 @@ func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningReque return } - if !longRunning(req, requestInfo) { - if err := wg.Add(1); err != nil { - // When apiserver is shutting down, signal clients to retry - // There is a good chance the client hit a different server, so a tight retry is good for client responsiveness. - w.Header().Add("Retry-After", "1") - w.Header().Set("Content-Type", runtime.ContentTypeJSON) - w.Header().Set("X-Content-Type-Options", "nosniff") - statusErr := apierrors.NewServiceUnavailable("apiserver is shutting down").Status() - w.WriteHeader(int(statusErr.Code)) - fmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr)) - return - } - defer wg.Done() + if longRunning(req, requestInfo) { + handler.ServeHTTP(w, req) + return } + if err := wg.Add(1); err != nil { + // shutdown delay duration has elapsed and SafeWaitGroup.Wait has been invoked, + // this means 'WithRetryAfter' has started sending Retry-After response. + // we are going to exempt the same set of requests that WithRetryAfter are + // exempting from being rejected with a Retry-After response. + if isRequestExemptFn(req) { + handler.ServeHTTP(w, req) + return + } + + // When apiserver is shutting down, signal clients to retry + // There is a good chance the client hit a different server, so a tight retry is good for client responsiveness. + w.Header().Add("Retry-After", "1") + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + w.Header().Set("X-Content-Type-Options", "nosniff") + statusErr := apierrors.NewServiceUnavailable("apiserver is shutting down").Status() + w.WriteHeader(int(statusErr.Code)) + fmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr)) + return + } + + defer wg.Done() handler.ServeHTTP(w, req) }) } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/with_retry_after.go b/vendor/k8s.io/apiserver/pkg/server/filters/with_retry_after.go new file mode 100644 index 000000000..c5e2daa8e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/with_retry_after.go @@ -0,0 +1,130 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + "strings" +) + +var ( + // health probes and metrics scraping are never rejected, we will continue + // serving these requests after shutdown delay duration elapses. + pathPrefixesExemptFromRetryAfter = []string{ + "/readyz", + "/livez", + "/healthz", + "/metrics", + } +) + +// isRequestExemptFunc returns true if the request should not be rejected, +// with a Retry-After response, otherwise it returns false. +type isRequestExemptFunc func(*http.Request) bool + +// retryAfterParams dictates how the Retry-After response is constructed +type retryAfterParams struct { + // TearDownConnection is true when we should send a 'Connection: close' + // header in the response so net/http can tear down the TCP connection. + TearDownConnection bool + + // Message describes why Retry-After response has been sent by the server + Message string +} + +// shouldRespondWithRetryAfterFunc returns true if the requests should +// be rejected with a Retry-After response once certain conditions are met. +// The retryAfterParams returned contains instructions on how to +// construct the Retry-After response. +type shouldRespondWithRetryAfterFunc func() (*retryAfterParams, bool) + +// WithRetryAfter rejects any incoming new request(s) with a 429 +// if the specified shutdownDelayDurationElapsedFn channel is closed +// +// It includes new request(s) on a new or an existing TCP connection +// Any new request(s) arriving after shutdownDelayDurationElapsedFn is closed +// are replied with a 429 and the following response headers: +// - 'Retry-After: N` (so client can retry after N seconds, hopefully on a new apiserver instance) +// - 'Connection: close': tear down the TCP connection +// +// TODO: is there a way to merge WithWaitGroup and this filter? +func WithRetryAfter(handler http.Handler, shutdownDelayDurationElapsedCh <-chan struct{}) http.Handler { + shutdownRetryAfterParams := &retryAfterParams{ + TearDownConnection: true, + Message: "The apiserver is shutting down, please try again later.", + } + + // NOTE: both WithRetryAfter and WithWaitGroup must use the same exact isRequestExemptFunc 'isRequestExemptFromRetryAfter, + // otherwise SafeWaitGroup might wait indefinitely and will prevent the server from shutting down gracefully. + return withRetryAfter(handler, isRequestExemptFromRetryAfter, func() (*retryAfterParams, bool) { + select { + case <-shutdownDelayDurationElapsedCh: + return shutdownRetryAfterParams, true + default: + return nil, false + } + }) +} + +func withRetryAfter(handler http.Handler, isRequestExemptFn isRequestExemptFunc, shouldRespondWithRetryAfterFn shouldRespondWithRetryAfterFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + params, send := shouldRespondWithRetryAfterFn() + if !send || isRequestExemptFn(req) { + handler.ServeHTTP(w, req) + return + } + + // If we are here this means it's time to send Retry-After response + // + // Copied from net/http2 library + // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), + // but respect "Connection" == "close" to mean sending a GOAWAY and tearing + // down the TCP connection when idle, like we do for HTTP/1. + if params.TearDownConnection { + w.Header().Set("Connection", "close") + } + + // Return a 429 status asking the client to try again after 5 seconds + w.Header().Set("Retry-After", "5") + http.Error(w, params.Message, http.StatusTooManyRequests) + }) +} + +// isRequestExemptFromRetryAfter returns true if the given request should be exempt +// from being rejected with a 'Retry-After' response. +// NOTE: both 'WithRetryAfter' and 'WithWaitGroup' filters should use this function +// to exempt the set of requests from being rejected or tracked. +func isRequestExemptFromRetryAfter(r *http.Request) bool { + return isKubeApiserverUserAgent(r) || hasExemptPathPrefix(r) +} + +// isKubeApiserverUserAgent returns true if the user-agent matches +// the one set by the local loopback. +// NOTE: we can't look up the authenticated user informaion from the +// request context since the authentication filter has not executed yet. +func isKubeApiserverUserAgent(req *http.Request) bool { + return strings.HasPrefix(req.UserAgent(), "kube-apiserver/") +} + +func hasExemptPathPrefix(r *http.Request) bool { + for _, whiteListedPrefix := range pathPrefixesExemptFromRetryAfter { + if strings.HasPrefix(r.URL.Path, whiteListedPrefix) { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go index 5f4e8e348..954c61756 100644 --- a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" "k8s.io/apimachinery/pkg/version" @@ -50,12 +49,14 @@ import ( utilopenapi "k8s.io/apiserver/pkg/util/openapi" restclient "k8s.io/client-go/rest" "k8s.io/klog/v2" - openapibuilder "k8s.io/kube-openapi/pkg/builder" + openapibuilder2 "k8s.io/kube-openapi/pkg/builder" openapicommon "k8s.io/kube-openapi/pkg/common" "k8s.io/kube-openapi/pkg/handler" + "k8s.io/kube-openapi/pkg/handler3" openapiutil "k8s.io/kube-openapi/pkg/util" openapiproto "k8s.io/kube-openapi/pkg/util/proto" "k8s.io/kube-openapi/pkg/validation/spec" + "k8s.io/utils/clock" ) // Info about an API group. @@ -144,6 +145,10 @@ type GenericAPIServer struct { // It is set during PrepareRun if `openAPIConfig` is non-nil unless `skipOpenAPIInstallation` is true. OpenAPIVersionedService *handler.OpenAPIService + // OpenAPIV3VersionedService controls the /openapi/v3 endpoint and can be used to update the served spec. + // It is set during PrepareRun if `openAPIConfig` is non-nil unless `skipOpenAPIInstallation` is true. + OpenAPIV3VersionedService *handler3.OpenAPIService + // StaticOpenAPISpec is the spec derived from the restful container endpoints. // It is set during PrepareRun. StaticOpenAPISpec *spec.Swagger @@ -213,6 +218,21 @@ type GenericAPIServer struct { // lifecycleSignals provides access to the various signals that happen during the life cycle of the apiserver. lifecycleSignals lifecycleSignals + + // muxAndDiscoveryCompleteSignals holds signals that indicate all known HTTP paths have been registered. + // it exists primarily to avoid returning a 404 response when a resource actually exists but we haven't installed the path to a handler. + // it is exposed for easier composition of the individual servers. + // the primary users of this field are the WithMuxCompleteProtection filter and the NotFoundHandler + muxAndDiscoveryCompleteSignals map[string]<-chan struct{} + + // ShutdownSendRetryAfter dictates when to initiate shutdown of the HTTP + // Server during the graceful termination of the apiserver. If true, we wait + // for non longrunning requests in flight to be drained and then initiate a + // shutdown of the HTTP Server. If false, we initiate a shutdown of the HTTP + // Server as soon as ShutdownDelayDuration has elapsed. + // If enabled, after ShutdownDelayDuration elapses, any incoming request is + // rejected with a 429 status code and a 'Retry-After' response. + ShutdownSendRetryAfter bool } // DelegationTarget is an interface which allows for composition of API servers with top level handling that works @@ -238,6 +258,9 @@ type DelegationTarget interface { // PrepareRun does post API installation setup steps. It calls recursively the same function of the delegates. PrepareRun() preparedGenericAPIServer + + // MuxAndDiscoveryCompleteSignals exposes registered signals that indicate if all known HTTP paths have been installed. + MuxAndDiscoveryCompleteSignals() map[string]<-chan struct{} } func (s *GenericAPIServer) UnprotectedHandler() http.Handler { @@ -261,15 +284,37 @@ func (s *GenericAPIServer) NextDelegate() DelegationTarget { return s.delegationTarget } +// RegisterMuxAndDiscoveryCompleteSignal registers the given signal that will be used to determine if all known +// HTTP paths have been registered. It is okay to call this method after instantiating the generic server but before running. +func (s *GenericAPIServer) RegisterMuxAndDiscoveryCompleteSignal(signalName string, signal <-chan struct{}) error { + if _, exists := s.muxAndDiscoveryCompleteSignals[signalName]; exists { + return fmt.Errorf("%s already registered", signalName) + } + s.muxAndDiscoveryCompleteSignals[signalName] = signal + return nil +} + +func (s *GenericAPIServer) MuxAndDiscoveryCompleteSignals() map[string]<-chan struct{} { + return s.muxAndDiscoveryCompleteSignals +} + type emptyDelegate struct { + // handler is called at the end of the delegation chain + // when a request has been made against an unregistered HTTP path the individual servers will simply pass it through until it reaches the handler. + handler http.Handler } func NewEmptyDelegate() DelegationTarget { return emptyDelegate{} } +// NewEmptyDelegateWithCustomHandler allows for registering a custom handler usually for special handling of 404 requests +func NewEmptyDelegateWithCustomHandler(handler http.Handler) DelegationTarget { + return emptyDelegate{handler} +} + func (s emptyDelegate) UnprotectedHandler() http.Handler { - return nil + return s.handler } func (s emptyDelegate) PostStartHooks() map[string]postStartHookEntry { return map[string]postStartHookEntry{} @@ -289,6 +334,9 @@ func (s emptyDelegate) NextDelegate() DelegationTarget { func (s emptyDelegate) PrepareRun() preparedGenericAPIServer { return preparedGenericAPIServer{nil} } +func (s emptyDelegate) MuxAndDiscoveryCompleteSignals() map[string]<-chan struct{} { + return map[string]<-chan struct{}{} +} // preparedGenericAPIServer is a private wrapper that enforces a call of PrepareRun() before Run can be invoked. type preparedGenericAPIServer struct { @@ -302,7 +350,12 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { if s.openAPIConfig != nil && !s.skipOpenAPIInstallation { s.OpenAPIVersionedService, s.StaticOpenAPISpec = routes.OpenAPI{ Config: s.openAPIConfig, - }.Install(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) + }.InstallV2(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) + if utilfeature.DefaultFeatureGate.Enabled(features.OpenAPIV3) { + s.OpenAPIV3VersionedService = routes.OpenAPI{ + Config: s.openAPIConfig, + }.InstallV3(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) + } } s.installHealthz() @@ -336,6 +389,23 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { delayedStopCh := s.lifecycleSignals.AfterShutdownDelayDuration shutdownInitiatedCh := s.lifecycleSignals.ShutdownInitiated + // spawn a new goroutine for closing the MuxAndDiscoveryComplete signal + // registration happens during construction of the generic api server + // the last server in the chain aggregates signals from the previous instances + go func() { + for _, muxAndDiscoveryCompletedSignal := range s.GenericAPIServer.MuxAndDiscoveryCompleteSignals() { + select { + case <-muxAndDiscoveryCompletedSignal: + continue + case <-stopCh: + klog.V(1).Infof("haven't completed %s, stop requested", s.lifecycleSignals.MuxAndDiscoveryComplete.Name()) + return + } + } + s.lifecycleSignals.MuxAndDiscoveryComplete.Signal() + klog.V(1).Infof("%s has all endpoints registered and discovery information is complete", s.lifecycleSignals.MuxAndDiscoveryComplete.Name()) + }() + go func() { defer delayedStopCh.Signal() defer klog.V(1).InfoS("[graceful-termination] shutdown event", "name", delayedStopCh.Name()) @@ -352,7 +422,22 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { }() // close socket after delayed stopCh - stoppedCh, listenerStoppedCh, err := s.NonBlockingRun(delayedStopCh.Signaled()) + drainedCh := s.lifecycleSignals.InFlightRequestsDrained + stopHttpServerCh := delayedStopCh.Signaled() + shutdownTimeout := s.ShutdownTimeout + if s.ShutdownSendRetryAfter { + // when this mode is enabled, we do the following: + // - the server will continue to listen until all existing requests in flight + // (not including active long runnning requests) have been drained. + // - once drained, http Server Shutdown is invoked with a timeout of 2s, + // net/http waits for 1s for the peer to respond to a GO_AWAY frame, so + // we should wait for a minimum of 2s + stopHttpServerCh = drainedCh.Signaled() + shutdownTimeout = 2 * time.Second + klog.V(1).InfoS("[graceful-termination] using HTTP Server shutdown timeout", "ShutdownTimeout", shutdownTimeout) + } + + stoppedCh, listenerStoppedCh, err := s.NonBlockingRun(stopHttpServerCh, shutdownTimeout) if err != nil { return err } @@ -363,7 +448,6 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { klog.V(1).InfoS("[graceful-termination] shutdown event", "name", httpServerStoppedListeningCh.Name()) }() - drainedCh := s.lifecycleSignals.InFlightRequestsDrained go func() { defer drainedCh.Signal() defer klog.V(1).InfoS("[graceful-termination] shutdown event", "name", drainedCh.Name()) @@ -397,7 +481,7 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { // NonBlockingRun spawns the secure http server. An error is // returned if the secure port cannot be listened on. // The returned channel is closed when the (asynchronous) termination is finished. -func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) (<-chan struct{}, <-chan struct{}, error) { +func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}, shutdownTimeout time.Duration) (<-chan struct{}, <-chan struct{}, error) { // Use an stop channel to allow graceful shutdown without dropping audit events // after http server shutdown. auditStopCh := make(chan struct{}) @@ -416,8 +500,7 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) (<-chan var listenerStoppedCh <-chan struct{} if s.SecureServingInfo != nil && s.Handler != nil { var err error - klog.V(1).Infof("[graceful-termination] ShutdownTimeout=%s", s.ShutdownTimeout) - stoppedCh, listenerStoppedCh, err = s.SecureServingInfo.ServeWithListenerStopped(s.Handler, s.ShutdownTimeout, internalStopCh) + stoppedCh, listenerStoppedCh, err = s.SecureServingInfo.ServeWithListenerStopped(s.Handler, shutdownTimeout, internalStopCh) if err != nil { close(internalStopCh) close(auditStopCh) @@ -633,7 +716,7 @@ func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...* } // Build the openapi definitions for those resources and convert it to proto models - openAPISpec, err := openapibuilder.BuildOpenAPIDefinitionsForResources(s.openAPIConfig, resourceNames...) + openAPISpec, err := openapibuilder2.BuildOpenAPIDefinitionsForResources(s.openAPIConfig, resourceNames...) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz.go index 61da5873c..d6d13444d 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz.go @@ -21,8 +21,8 @@ import ( "net/http" "time" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/utils/clock" ) // AddHealthChecks adds HealthCheck(s) to health endpoints (healthz, livez, readyz) but diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go index eb288239c..e19505e9c 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/endpoints/metrics" - "k8s.io/apiserver/pkg/server/httplog" + "k8s.io/apiserver/pkg/endpoints/responsewriter" "k8s.io/klog/v2" ) @@ -255,7 +255,7 @@ func handleRootHealth(name string, firstTimeHealthy func(), checks ...HealthChec // always be verbose on failure if len(failedChecks) > 0 { klog.V(2).Infof("%s check failed: %s\n%v", strings.Join(failedChecks, ","), name, failedVerboseLogOutput.String()) - http.Error(httplog.Unlogged(r, w), fmt.Sprintf("%s%s check failed", individualCheckOutput.String(), name), http.StatusInternalServerError) + http.Error(responsewriter.GetOriginal(w), fmt.Sprintf("%s%s check failed", individualCheckOutput.String(), name), http.StatusInternalServerError) return } diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go index 89ba28a9e..8ac036f9b 100644 --- a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -24,16 +24,21 @@ import ( "net/http" "runtime" "strings" + "sync" "time" "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/endpoints/responsewriter" "k8s.io/klog/v2" ) // StacktracePred returns true if a stacktrace should be logged for this status. type StacktracePred func(httpStatus int) (logStacktrace bool) +// ShouldLogRequestPred returns true if logging should be enabled for this request +type ShouldLogRequestPred func() bool + type logger interface { Addf(format string, data ...interface{}) } @@ -54,23 +59,32 @@ type respLogger struct { statusRecorded bool status int statusStack string - addedInfo strings.Builder - startTime time.Time + // mutex is used when accessing addedInfo and addedKeyValuePairs. + // They can be modified by other goroutine when logging happens (in case of request timeout) + mutex sync.Mutex + addedInfo strings.Builder + addedKeyValuePairs []interface{} + startTime time.Time captureErrorOutput bool - req *http.Request - w http.ResponseWriter + req *http.Request + userAgent string + w http.ResponseWriter logStacktracePred StacktracePred } +var _ http.ResponseWriter = &respLogger{} +var _ responsewriter.UserProvidedDecorator = &respLogger{} + +func (rl *respLogger) Unwrap() http.ResponseWriter { + return rl.w +} + // Simple logger that logs immediately when Addf is called type passthroughLogger struct{} -//lint:ignore SA1019 Interface implementation check to make sure we don't drop CloseNotifier again -var _ http.CloseNotifier = &respLogger{} - // Addf logs info immediately. func (passthroughLogger) Addf(format string, data ...interface{}) { klog.V(2).Info(fmt.Sprintf(format, data...)) @@ -83,7 +97,18 @@ func DefaultStacktracePred(status int) bool { // WithLogging wraps the handler with logging. func WithLogging(handler http.Handler, pred StacktracePred) http.Handler { + return withLogging(handler, pred, func() bool { + return klog.V(3).Enabled() + }) +} + +func withLogging(handler http.Handler, stackTracePred StacktracePred, shouldLogRequest ShouldLogRequestPred) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if !shouldLogRequest() { + handler.ServeHTTP(w, req) + return + } + ctx := req.Context() if old := respLoggerFromRequest(req); old != nil { panic("multiple WithLogging calls!") @@ -94,13 +119,13 @@ func WithLogging(handler http.Handler, pred StacktracePred) http.Handler { startTime = receivedTimestamp } - rl := newLoggedWithStartTime(req, w, startTime).StacktraceWhen(pred) + rl := newLoggedWithStartTime(req, w, startTime) + rl.StacktraceWhen(stackTracePred) req = req.WithContext(context.WithValue(ctx, respLoggerContextKey, rl)) + defer rl.Log() - if klog.V(3).Enabled() { - defer rl.Log() - } - handler.ServeHTTP(rl, req) + w = responsewriter.WrapForHTTP1Or2(rl) + handler.ServeHTTP(w, req) }) } @@ -118,12 +143,14 @@ func respLoggerFromRequest(req *http.Request) *respLogger { } func newLoggedWithStartTime(req *http.Request, w http.ResponseWriter, startTime time.Time) *respLogger { - return &respLogger{ + logger := &respLogger{ startTime: startTime, req: req, + userAgent: req.UserAgent(), w: w, logStacktracePred: DefaultStacktracePred, } + return logger } // newLogged turns a normal response writer into a logged response writer. @@ -171,6 +198,8 @@ func StatusIsNot(statuses ...int) StacktracePred { // Addf adds additional data to be logged with this request. func (rl *respLogger) Addf(format string, data ...interface{}) { + rl.mutex.Lock() + defer rl.mutex.Unlock() rl.addedInfo.WriteString("\n") rl.addedInfo.WriteString(fmt.Sprintf(format, data...)) } @@ -181,6 +210,22 @@ func AddInfof(ctx context.Context, format string, data ...interface{}) { } } +func (rl *respLogger) AddKeyValue(key string, value interface{}) { + rl.mutex.Lock() + defer rl.mutex.Unlock() + rl.addedKeyValuePairs = append(rl.addedKeyValuePairs, key, value) +} + +// AddKeyValue adds a (key, value) pair to the httplog associated +// with the request. +// Use this function if you want your data to show up in httplog +// in a more structured and readable way. +func AddKeyValue(ctx context.Context, key string, value interface{}) { + if rl := respLoggerFromContext(ctx); rl != nil { + rl.AddKeyValue(key, value) + } +} + // Log is intended to be called once at the end of your request handler, via defer func (rl *respLogger) Log() { latency := time.Since(rl.startTime) @@ -200,10 +245,19 @@ func (rl *respLogger) Log() { "verb", verb, "URI", rl.req.RequestURI, "latency", latency, - "userAgent", rl.req.UserAgent(), + // We can't get UserAgent from rl.req.UserAgent() here as it accesses headers map, + // which can be modified in another goroutine when apiserver request times out. + // For example authentication filter modifies request's headers, + // This can cause apiserver to crash with unrecoverable fatal error. + // More info about concurrent read and write for maps: https://golang.org/doc/go1.6#runtime + "userAgent", rl.userAgent, "audit-ID", auditID, "srcIP", rl.req.RemoteAddr, } + // Lock for accessing addedKeyValuePairs and addedInfo + rl.mutex.Lock() + defer rl.mutex.Unlock() + keysAndValues = append(keysAndValues, rl.addedKeyValuePairs...) if rl.hijacked { keysAndValues = append(keysAndValues, "hijacked", true) @@ -237,32 +291,18 @@ func (rl *respLogger) Write(b []byte) (int, error) { return rl.w.Write(b) } -// Flush implements http.Flusher even if the underlying http.Writer doesn't implement it. -// Flush is used for streaming purposes and allows to flush buffered data to the client. -func (rl *respLogger) Flush() { - if flusher, ok := rl.w.(http.Flusher); ok { - flusher.Flush() - } else if klog.V(2).Enabled() { - klog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w)) - } -} - // WriteHeader implements http.ResponseWriter. func (rl *respLogger) WriteHeader(status int) { rl.recordStatus(status) rl.w.WriteHeader(status) } -// Hijack implements http.Hijacker. func (rl *respLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { rl.hijacked = true - return rl.w.(http.Hijacker).Hijack() -} -// CloseNotify implements http.CloseNotifier -func (rl *respLogger) CloseNotify() <-chan bool { - //lint:ignore SA1019 There are places in the code base requiring the CloseNotifier interface to be implemented. - return rl.w.(http.CloseNotifier).CloseNotify() + // the outer ResponseWriter object returned by WrapForHTTP1Or2 implements + // http.Hijacker if the inner object (rl.w) implements http.Hijacker. + return rl.w.(http.Hijacker).Hijack() } func (rl *respLogger) recordStatus(status int) { diff --git a/vendor/k8s.io/apiserver/pkg/server/lifecycle_signals.go b/vendor/k8s.io/apiserver/pkg/server/lifecycle_signals.go index fda4f0951..6b406072b 100644 --- a/vendor/k8s.io/apiserver/pkg/server/lifecycle_signals.go +++ b/vendor/k8s.io/apiserver/pkg/server/lifecycle_signals.go @@ -16,6 +16,10 @@ limitations under the License. package server +import ( + "sync" +) + /* We make an attempt here to identify the events that take place during lifecycle of the apiserver. @@ -28,7 +32,9 @@ Events: - InFlightRequestsDrained: all in flight request(s) have been drained - HasBeenReady is signaled when the readyz endpoint succeeds for the first time -The following is a sequence of shutdown events that we expect to see during termination: +The following is a sequence of shutdown events that we expect to see with + 'ShutdownSendRetryAfter' = false: + T0: ShutdownInitiated: KILL signal received - /readyz starts returning red - run pre shutdown hooks @@ -54,6 +60,31 @@ T0 + 70s + up-to 60s: InFlightRequestsDrained: existing in flight requests have any request in flight has a hard timeout of 60s. - it's time to call 'Shutdown' on the audit events since all in flight request(s) have drained. + + +The following is a sequence of shutdown events that we expect to see with + 'ShutdownSendRetryAfter' = true: + +T0: ShutdownInitiated: KILL signal received + - /readyz starts returning red + - run pre shutdown hooks + +T0+70s: AfterShutdownDelayDuration: shutdown delay duration has passed + - the default value of 'ShutdownDelayDuration' is '70s' + - the HTTP Server will continue to listen + - the apiserver is not accepting new request(s) + - it includes new request(s) on a new or an existing TCP connection + - new request(s) arriving after this point are replied with a 429 + and the response headers: 'Retry-After: 1` and 'Connection: close' + - note: these new request(s) will not show up in audit logs + +T0 + 70s + up to 60s: InFlightRequestsDrained: existing in flight requests have been drained + - long running requests are outside of this scope + - up to 60s: the default value of 'ShutdownTimeout' is 60s, this means that + any request in flight has a hard timeout of 60s. + - server.Shutdown is called, the HTTP Server stops listening immediately + - the HTTP Server waits gracefully for existing requests to complete + up to '2s' (it's hard coded right now) */ // lifecycleSignal encapsulates a named apiserver event @@ -99,6 +130,11 @@ type lifecycleSignals struct { // HasBeenReady is signaled when the readyz endpoint succeeds for the first time. HasBeenReady lifecycleSignal + + // MuxAndDiscoveryComplete is signaled when all known HTTP paths have been installed. + // It exists primarily to avoid returning a 404 response when a resource actually exists but we haven't installed the path to a handler. + // The actual logic is implemented by an APIServer using the generic server library. + MuxAndDiscoveryComplete lifecycleSignal } // newLifecycleSignals returns an instance of lifecycleSignals interface to be used @@ -110,28 +146,28 @@ func newLifecycleSignals() lifecycleSignals { InFlightRequestsDrained: newNamedChannelWrapper("InFlightRequestsDrained"), HTTPServerStoppedListening: newNamedChannelWrapper("HTTPServerStoppedListening"), HasBeenReady: newNamedChannelWrapper("HasBeenReady"), + MuxAndDiscoveryComplete: newNamedChannelWrapper("MuxAndDiscoveryComplete"), } } func newNamedChannelWrapper(name string) lifecycleSignal { return &namedChannelWrapper{ name: name, + once: sync.Once{}, ch: make(chan struct{}), } } type namedChannelWrapper struct { name string + once sync.Once ch chan struct{} } func (e *namedChannelWrapper) Signal() { - select { - case <-e.ch: - // already closed, don't close again. - default: + e.once.Do(func() { close(e.ch) - } + }) } func (e *namedChannelWrapper) Signaled() <-chan struct{} { diff --git a/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go index 794e89ded..13968b4e7 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go @@ -58,7 +58,7 @@ func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) { // But in the advanced (and usually not recommended) case of delegated apiservers there can be more. // Validate will filter out the known groups of each registry. // If anything is left over after that, an error is returned. -func (s *APIEnablementOptions) Validate(registries ...GroupRegisty) []error { +func (s *APIEnablementOptions) Validate(registries ...GroupRegistry) []error { if s == nil { return nil } @@ -98,7 +98,7 @@ func (s *APIEnablementOptions) ApplyTo(c *server.Config, defaultResourceConfig * return err } -func unknownGroups(groups []string, registry GroupRegisty) []string { +func unknownGroups(groups []string, registry GroupRegistry) []string { unknownGroups := []string{} for _, group := range groups { if !registry.IsGroupRegistered(group) { @@ -108,8 +108,8 @@ func unknownGroups(groups []string, registry GroupRegisty) []string { return unknownGroups } -// GroupRegisty provides a method to check whether given group is registered. -type GroupRegisty interface { +// GroupRegistry provides a method to check whether given group is registered. +type GroupRegistry interface { // IsRegistered returns true if given group is registered. IsGroupRegistered(group string) bool } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/audit.go b/vendor/k8s.io/apiserver/pkg/server/options/audit.go index 6e062c987..fde8e8eb0 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/audit.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/audit.go @@ -289,8 +289,8 @@ func (o *AuditOptions) ApplyTo( return fmt.Errorf("server config must be non-nil") } - // 1. Build policy checker - checker, err := o.newPolicyChecker() + // 1. Build policy evaluator + evaluator, err := o.newPolicyRuleEvaluator() if err != nil { return err } @@ -302,7 +302,7 @@ func (o *AuditOptions) ApplyTo( return err } if w != nil { - if checker == nil { + if evaluator == nil { klog.V(2).Info("No audit policy file provided, no events will be recorded for log backend") } else { logBackend = o.LogOptions.newBackend(w) @@ -312,7 +312,7 @@ func (o *AuditOptions) ApplyTo( // 3. Build webhook backend var webhookBackend audit.Backend if o.WebhookOptions.enabled() { - if checker == nil { + if evaluator == nil { klog.V(2).Info("No audit policy file provided, no events will be recorded for webhook backend") } else { if c.EgressSelector != nil { @@ -343,8 +343,8 @@ func (o *AuditOptions) ApplyTo( dynamicBackend = o.WebhookOptions.TruncateOptions.wrapBackend(webhookBackend, groupVersion) } - // 5. Set the policy checker - c.AuditPolicyChecker = checker + // 5. Set the policy rule evaluator + c.AuditPolicyRuleEvaluator = evaluator // 6. Join the log backend with the webhooks c.AuditBackend = appendBackend(logBackend, dynamicBackend) @@ -355,7 +355,7 @@ func (o *AuditOptions) ApplyTo( return nil } -func (o *AuditOptions) newPolicyChecker() (policy.Checker, error) { +func (o *AuditOptions) newPolicyRuleEvaluator() (audit.PolicyRuleEvaluator, error) { if o.PolicyFile == "" { return nil, nil } @@ -364,7 +364,7 @@ func (o *AuditOptions) newPolicyChecker() (policy.Checker, error) { if err != nil { return nil, fmt.Errorf("loading audit policy file: %v", err) } - return policy.NewChecker(p), nil + return policy.NewPolicyRuleEvaluator(p), nil } func (o *AuditBatchOptions) AddFlags(pluginName string, fs *pflag.FlagSet) { @@ -451,7 +451,7 @@ func (o *AuditLogOptions) AddFlags(fs *pflag.FlagSet) { fs.IntVar(&o.MaxAge, "audit-log-maxage", o.MaxAge, "The maximum number of days to retain old audit log files based on the timestamp encoded in their filename.") fs.IntVar(&o.MaxBackups, "audit-log-maxbackup", o.MaxBackups, - "The maximum number of old audit log files to retain.") + "The maximum number of old audit log files to retain. Setting a value of 0 will mean there's no restriction on the number of files.") fs.IntVar(&o.MaxSize, "audit-log-maxsize", o.MaxSize, "The maximum size in megabytes of the audit log file before it gets rotated.") fs.StringVar(&o.Format, "audit-log-format", o.Format, diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go index 09659676a..fd0c95c22 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -101,7 +101,7 @@ func getKMSPluginProbes(reader io.Reader) ([]*kmsPluginProbe, error) { config, err := loadConfig(configFileContents) if err != nil { - return result, fmt.Errorf("error while parsing encrypiton provider configuration: %v", err) + return result, fmt.Errorf("error while parsing encryption provider configuration: %v", err) } for _, r := range config.Resources { diff --git a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go index d8b45b819..dfadadbca 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -206,6 +206,9 @@ func (s *EtcdOptions) ApplyTo(c *server.Config) error { } } + // use the StorageObjectCountTracker interface instance from server.Config + s.StorageConfig.StorageObjectCountTracker = c.StorageObjectCountTracker + c.RESTOptionsGetter = &SimpleRestOptionsFactory{ Options: *s, TransformerOverrides: transformerOverrides, @@ -217,6 +220,10 @@ func (s *EtcdOptions) ApplyWithStorageFactoryTo(factory serverstorage.StorageFac if err := s.addEtcdHealthEndpoint(c); err != nil { return err } + + // use the StorageObjectCountTracker interface instance from server.Config + s.StorageConfig.StorageObjectCountTracker = c.StorageObjectCountTracker + c.RESTOptionsGetter = &StorageFactoryRestOptionsFactory{Options: *s, StorageFactory: factory} return nil } @@ -248,12 +255,13 @@ type SimpleRestOptionsFactory struct { func (f *SimpleRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) { ret := generic.RESTOptions{ - StorageConfig: &f.Options.StorageConfig, - Decorator: generic.UndecoratedStorage, - EnableGarbageCollection: f.Options.EnableGarbageCollection, - DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, - ResourcePrefix: resource.Group + "/" + resource.Resource, - CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, + StorageConfig: f.Options.StorageConfig.ForResource(resource), + Decorator: generic.UndecoratedStorage, + EnableGarbageCollection: f.Options.EnableGarbageCollection, + DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, + ResourcePrefix: resource.Group + "/" + resource.Resource, + CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, + StorageObjectCountTracker: f.Options.StorageConfig.StorageObjectCountTracker, } if f.TransformerOverrides != nil { if transformer, ok := f.TransformerOverrides[resource]; ok { @@ -290,12 +298,13 @@ func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupR } ret := generic.RESTOptions{ - StorageConfig: storageConfig, - Decorator: generic.UndecoratedStorage, - DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, - EnableGarbageCollection: f.Options.EnableGarbageCollection, - ResourcePrefix: f.StorageFactory.ResourcePrefix(resource), - CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, + StorageConfig: storageConfig, + Decorator: generic.UndecoratedStorage, + DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, + EnableGarbageCollection: f.Options.EnableGarbageCollection, + ResourcePrefix: f.StorageFactory.ResourcePrefix(resource), + CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, + StorageObjectCountTracker: f.Options.StorageConfig.StorageObjectCountTracker, } if f.Options.EnableWatchCache { sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go index b8d60517a..0d9c6ca9f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -143,7 +143,7 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { } config.FlowControl = utilflowcontrol.New( config.SharedInformerFactory, - kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta1(), + kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta2(), config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight, config.RequestTimeout/4, ) diff --git a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go index 9758eec11..07a887a53 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -54,6 +54,15 @@ type ServerRunOptions struct { // apiserver library can wire it to a flag. MaxRequestBodyBytes int64 EnablePriorityAndFairness bool + + // ShutdownSendRetryAfter dictates when to initiate shutdown of the HTTP + // Server during the graceful termination of the apiserver. If true, we wait + // for non longrunning requests in flight to be drained and then initiate a + // shutdown of the HTTP Server. If false, we initiate a shutdown of the HTTP + // Server as soon as ShutdownDelayDuration has elapsed. + // If enabled, after ShutdownDelayDuration elapses, any incoming request is + // rejected with a 429 status code and a 'Retry-After' response. + ShutdownSendRetryAfter bool } func NewServerRunOptions() *ServerRunOptions { @@ -68,6 +77,7 @@ func NewServerRunOptions() *ServerRunOptions { JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes, MaxRequestBodyBytes: defaults.MaxRequestBodyBytes, EnablePriorityAndFairness: true, + ShutdownSendRetryAfter: false, } } @@ -86,6 +96,7 @@ func (s *ServerRunOptions) ApplyTo(c *server.Config) error { c.JSONPatchMaxCopyBytes = s.JSONPatchMaxCopyBytes c.MaxRequestBodyBytes = s.MaxRequestBodyBytes c.PublicAddress = s.AdvertiseAddress + c.ShutdownSendRetryAfter = s.ShutdownSendRetryAfter return nil } @@ -245,5 +256,10 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "will return success, but /readyz immediately returns failure. Graceful termination starts after this delay "+ "has elapsed. This can be used to allow load balancer to stop sending traffic to this server.") + fs.BoolVar(&s.ShutdownSendRetryAfter, "shutdown-send-retry-after", s.ShutdownSendRetryAfter, ""+ + "If true the HTTP Server will continue listening until all non long running request(s) in flight have been drained, "+ + "during this window all incoming requests will be rejected with a status code 429 and a 'Retry-After' response header, "+ + "in addition 'Connection: close' response header is set in order to tear down the TCP connection when idle.") + utilfeature.DefaultMutableFeatureGate.AddFlag(fs) } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving.go b/vendor/k8s.io/apiserver/pkg/server/options/serving.go index f435ba5b8..c64798b4f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving.go @@ -27,6 +27,7 @@ import ( "github.com/spf13/pflag" "k8s.io/klog/v2" + netutils "k8s.io/utils/net" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apiserver/pkg/server" @@ -108,7 +109,7 @@ type GeneratableKeyCert struct { func NewSecureServingOptions() *SecureServingOptions { return &SecureServingOptions{ - BindAddress: net.ParseIP("0.0.0.0"), + BindAddress: netutils.ParseIPSloppy("0.0.0.0"), BindPort: 443, ServerCert: GeneratableKeyCert{ PairName: "apiserver", diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go index 5bf87e4b1..483eac79b 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go b/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go index 1663acee0..844de6188 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go b/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go index 1121e95c3..d30f74b9c 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go @@ -19,6 +19,7 @@ package routes import ( apimetrics "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/server/mux" + cachermetrics "k8s.io/apiserver/pkg/storage/cacher/metrics" etcd3metrics "k8s.io/apiserver/pkg/storage/etcd3/metrics" flowcontrolmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" "k8s.io/component-base/metrics/legacyregistry" @@ -46,6 +47,7 @@ func (m MetricsWithReset) Install(c *mux.PathRecorderMux) { // register apiserver and etcd metrics func register() { apimetrics.Register() + cachermetrics.Register() etcd3metrics.Register() flowcontrolmetrics.Register() } diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go index 4be4d03fc..44e463532 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go +++ b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -21,9 +21,11 @@ import ( "k8s.io/klog/v2" "k8s.io/apiserver/pkg/server/mux" - "k8s.io/kube-openapi/pkg/builder" + builder2 "k8s.io/kube-openapi/pkg/builder" + "k8s.io/kube-openapi/pkg/builder3" "k8s.io/kube-openapi/pkg/common" "k8s.io/kube-openapi/pkg/handler" + "k8s.io/kube-openapi/pkg/handler3" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -33,8 +35,8 @@ type OpenAPI struct { } // Install adds the SwaggerUI webservice to the given mux. -func (oa OpenAPI) Install(c *restful.Container, mux *mux.PathRecorderMux) (*handler.OpenAPIService, *spec.Swagger) { - spec, err := builder.BuildOpenAPISpec(c.RegisteredWebServices(), oa.Config) +func (oa OpenAPI) InstallV2(c *restful.Container, mux *mux.PathRecorderMux) (*handler.OpenAPIService, *spec.Swagger) { + spec, err := builder2.BuildOpenAPISpec(c.RegisteredWebServices(), oa.Config) if err != nil { klog.Fatalf("Failed to build open api spec for root: %v", err) } @@ -51,3 +53,34 @@ func (oa OpenAPI) Install(c *restful.Container, mux *mux.PathRecorderMux) (*hand return openAPIVersionedService, spec } + +// InstallV3 adds the static group/versions defined in the RegisteredWebServices to the OpenAPI v3 spec +func (oa OpenAPI) InstallV3(c *restful.Container, mux *mux.PathRecorderMux) *handler3.OpenAPIService { + openAPIVersionedService, err := handler3.NewOpenAPIService(nil) + if err != nil { + klog.Fatalf("Failed to create OpenAPIService: %v", err) + } + + err = openAPIVersionedService.RegisterOpenAPIV3VersionedService("/openapi/v3", mux) + if err != nil { + klog.Fatalf("Failed to register versioned open api spec for root: %v", err) + } + + grouped := make(map[string][]*restful.WebService) + + for _, t := range c.RegisteredWebServices() { + // Strip the "/" prefix from the name + gvName := t.RootPath()[1:] + grouped[gvName] = []*restful.WebService{t} + } + + for gv, ws := range grouped { + spec, err := builder3.BuildOpenAPISpec(ws, oa.Config) + if err != nil { + klog.Errorf("Failed to build OpenAPI v3 for group %s, %q", gv, err) + + } + openAPIVersionedService.UpdateGroupVersion(gv, spec) + } + return openAPIVersionedService +} diff --git a/vendor/k8s.io/apiserver/pkg/server/secure_serving.go b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go index c706afb5f..5626cb3a9 100644 --- a/vendor/k8s.io/apiserver/pkg/server/secure_serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go @@ -157,6 +157,9 @@ func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Dur Handler: handler, MaxHeaderBytes: 1 << 20, TLSConfig: tlsConfig, + + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadHeaderTimeout: 32 * time.Second, // just shy of requestTimeoutUpperBound } // At least 99% of serialized resources in surveyed clusters were smaller than 256kb. @@ -164,7 +167,9 @@ func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Dur // and small enough to allow a per connection buffer of this size multiplied by `MaxConcurrentStreams`. const resourceBody99Percentile = 256 * 1024 - http2Options := &http2.Server{} + http2Options := &http2.Server{ + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + } // shrink the per-stream buffer and max framesize from the 1MB default while still accommodating most API POST requests in a single frame http2Options.MaxUploadBufferPerStream = resourceBody99Percentile @@ -218,6 +223,9 @@ func (s *SecureServingInfo) ServeWithListenerStopped(handler http.Handler, shutd Handler: handler, MaxHeaderBytes: 1 << 20, TLSConfig: tlsConfig, + + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadHeaderTimeout: 32 * time.Second, // just shy of requestTimeoutUpperBound } // At least 99% of serialized resources in surveyed clusters were smaller than 256kb. @@ -225,7 +233,9 @@ func (s *SecureServingInfo) ServeWithListenerStopped(handler http.Handler, shutd // and small enough to allow a per connection buffer of this size multiplied by `MaxConcurrentStreams`. const resourceBody99Percentile = 256 * 1024 - http2Options := &http2.Server{} + http2Options := &http2.Server{ + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + } // shrink the per-stream buffer and max framesize from the 1MB default while still accommodating most API POST requests in a single frame http2Options.MaxUploadBufferPerStream = resourceBody99Percentile diff --git a/vendor/k8s.io/apiserver/pkg/server/signal_posix.go b/vendor/k8s.io/apiserver/pkg/server/signal_posix.go index 11b3bba65..7acb2038a 100644 --- a/vendor/k8s.io/apiserver/pkg/server/signal_posix.go +++ b/vendor/k8s.io/apiserver/pkg/server/signal_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go index 689b51322..3b8c71de1 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -46,7 +46,7 @@ type Backend struct { type StorageFactory interface { // New finds the storage destination for the given group and resource. It will // return an error if the group has no storage destination configured. - NewConfig(groupResource schema.GroupResource) (*storagebackend.Config, error) + NewConfig(groupResource schema.GroupResource) (*storagebackend.ConfigForResource, error) // ResourcePrefix returns the overridden resource prefix for the GroupResource // This allows for cohabitation of resources with different native types and provides @@ -250,7 +250,7 @@ func (s *DefaultStorageFactory) getStorageGroupResource(groupResource schema.Gro // New finds the storage destination for the given group and resource. It will // return an error if the group has no storage destination configured. -func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (*storagebackend.Config, error) { +func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (*storagebackend.ConfigForResource, error) { chosenStorageResource := s.getStorageGroupResource(groupResource) // operate on copy @@ -284,7 +284,7 @@ func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (* } klog.V(3).Infof("storing %v in %v, reading as %v from %#v", groupResource, codecConfig.StorageVersion, codecConfig.MemoryVersion, codecConfig.Config) - return &storageConfig, nil + return storageConfig.ForResource(groupResource), nil } // Backends returns all backends for all registered storage destinations. diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 03efddb5b..217fa7258 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -31,16 +31,17 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/cacher/metrics" utilfeature "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" + "k8s.io/utils/clock" utiltrace "k8s.io/utils/trace" ) @@ -191,7 +192,7 @@ func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool { if bucketID < t.startBucketID { bucketID = t.startBucketID } - watchers, _ := t.watchersBuckets[bucketID] + watchers := t.watchersBuckets[bucketID] t.watchersBuckets[bucketID] = append(watchers, w) return true } @@ -231,6 +232,8 @@ type Cacher struct { // Incoming events that should be dispatched to watchers. incoming chan watchCacheEvent + resourcePrefix string + sync.RWMutex // Before accessing the cacher's cache, wait for the ready to be ok. @@ -329,6 +332,7 @@ func NewCacherFromConfig(config Config) (*Cacher, error) { } objType := reflect.TypeOf(obj) cacher := &Cacher{ + resourcePrefix: config.ResourcePrefix, ready: newReady(), storage: config.Storage, objectType: objType, @@ -342,7 +346,7 @@ func NewCacherFromConfig(config Config) (*Cacher, error) { }, // TODO: Figure out the correct value for the buffer size. incoming: make(chan watchCacheEvent, 100), - dispatchTimeoutBudget: newTimeBudget(stopCh), + dispatchTimeoutBudget: newTimeBudget(), // We need to (potentially) stop both: // - wait.Until go-routine // - reflector.ListAndWatch @@ -512,13 +516,6 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions return newErrWatcher(err), nil } - // With some events already sent, update resourceVersion so that - // events that were buffered and not yet processed won't be delivered - // to this watcher second time causing going back in time. - if len(initEvents) > 0 { - watchRV = initEvents[len(initEvents)-1].ResourceVersion - } - func() { c.Lock() defer c.Unlock() @@ -533,7 +530,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions c.watcherIdx++ }() - go watcher.process(ctx, initEvents, watchRV) + go watcher.processEvents(ctx, initEvents, watchRV) return watcher, nil } @@ -593,6 +590,8 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o return nil } +// NOTICE: Keep in sync with shouldListFromStorage function in +// staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go func shouldDelegateList(opts storage.ListOptions) bool { resourceVersion := opts.ResourceVersion pred := opts.Predicate @@ -716,7 +715,7 @@ func (c *Cacher) List(ctx context.Context, key string, opts storage.ListOptions, } filter := filterWithAttrsFunction(key, pred) - objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV, pred.MatcherIndex(), trace) + objs, readResourceVersion, indexUsed, err := c.watchCache.WaitUntilFreshAndList(listRV, pred.MatcherIndex(), trace) if err != nil { return err } @@ -742,6 +741,7 @@ func (c *Cacher) List(ctx context.Context, key string, opts storage.ListOptions, return err } } + metrics.RecordListCacheMetrics(c.resourcePrefix, indexUsed, len(objs), listVal.Len()) return nil } @@ -927,8 +927,11 @@ func (c *Cacher) dispatchEvent(event *watchCacheEvent) { timeout := c.dispatchTimeoutBudget.takeAvailable() c.timer.Reset(timeout) - // Make sure every watcher will try to send event without blocking first, - // even if the timer has already expired. + // Send event to all blocked watchers. As long as timer is running, + // `add` will wait for the watcher to unblock. After timeout, + // `add` will not wait, but immediately close a still blocked watcher. + // Hence, every watcher gets the chance to unblock itself while timer + // is running, not only the first ones in the list. timer := c.timer for _, watcher := range c.blockedWatchers { if !watcher.add(event, timer) { @@ -1382,7 +1385,7 @@ func (c *cacheWatcher) sendWatchCacheEvent(event *watchCacheEvent) { } } -func (c *cacheWatcher) process(ctx context.Context, initEvents []*watchCacheEvent, resourceVersion uint64) { +func (c *cacheWatcher) processEvents(ctx context.Context, initEvents []*watchCacheEvent, resourceVersion uint64) { defer utilruntime.HandleCrash() // Check how long we are processing initEvents. @@ -1403,15 +1406,25 @@ func (c *cacheWatcher) process(ctx context.Context, initEvents []*watchCacheEven for _, event := range initEvents { c.sendWatchCacheEvent(event) } + objType := c.objectType.String() if len(initEvents) > 0 { initCounter.WithLabelValues(objType).Add(float64(len(initEvents))) + // With some events already sent, update resourceVersion + // so that events that were buffered and not yet processed + // won't be delivered to this watcher second time causing + // going back in time. + resourceVersion = initEvents[len(initEvents)-1].ResourceVersion } processingTime := time.Since(startTime) if processingTime > initProcessThreshold { klog.V(2).Infof("processing %d initEvents of %s (%s) took %v", len(initEvents), objType, c.identifier, processingTime) } + c.process(ctx, resourceVersion) +} + +func (c *cacheWatcher) process(ctx context.Context, resourceVersion uint64) { // At this point we already start processing incoming watch events. // However, the init event can still be processed because their serialization // and sending to the client happens asynchrnously. diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics/OWNERS b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics/OWNERS new file mode 100644 index 000000000..b26e7a4dc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-instrumentation-approvers +reviewers: + - sig-instrumentation-reviewers +labels: + - sig/instrumentation diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics/metrics.go new file mode 100644 index 000000000..652730332 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/metrics/metrics.go @@ -0,0 +1,78 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + listCacheCount = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_cache_list_total", + Help: "Number of LIST requests served from watch cache", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource_prefix", "index"}, + ) + listCacheNumFetched = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_cache_list_fetched_objects_total", + Help: "Number of objects read from watch cache in the course of serving a LIST request", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource_prefix", "index"}, + ) + listCacheNumReturned = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_cache_list_returned_objects_total", + Help: "Number of objects returned for a LIST request from watch cache", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource_prefix"}, + ) +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + // Register the metrics. + registerMetrics.Do(func() { + legacyregistry.MustRegister(listCacheCount) + legacyregistry.MustRegister(listCacheNumFetched) + legacyregistry.MustRegister(listCacheNumReturned) + }) +} + +// RecordListCacheMetrics notes various metrics of the cost to serve a LIST request +func RecordListCacheMetrics(resourcePrefix, indexName string, numFetched, numReturned int) { + listCacheCount.WithLabelValues(resourcePrefix, indexName).Inc() + listCacheNumFetched.WithLabelValues(resourcePrefix, indexName).Add(float64(numFetched)) + listCacheNumReturned.WithLabelValues(resourcePrefix).Add(float64(numReturned)) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go index 2eb0fed32..da77bd42b 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/time_budget.go @@ -19,6 +19,8 @@ package cacher import ( "sync" "time" + + "k8s.io/utils/clock" ) const ( @@ -46,42 +48,39 @@ type timeBudget interface { type timeBudgetImpl struct { sync.Mutex - budget time.Duration - - refresh time.Duration + clock clock.Clock + budget time.Duration maxBudget time.Duration + refresh time.Duration + // last store last access time + last time.Time } -func newTimeBudget(stopCh <-chan struct{}) timeBudget { +func newTimeBudget() timeBudget { result := &timeBudgetImpl{ + clock: clock.RealClock{}, budget: time.Duration(0), refresh: refreshPerSecond, maxBudget: maxBudget, } - go result.periodicallyRefresh(stopCh) + result.last = result.clock.Now() return result } -func (t *timeBudgetImpl) periodicallyRefresh(stopCh <-chan struct{}) { - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - for { - select { - case <-ticker.C: - t.Lock() - if t.budget = t.budget + t.refresh; t.budget > t.maxBudget { - t.budget = t.maxBudget - } - t.Unlock() - case <-stopCh: - return - } - } -} - func (t *timeBudgetImpl) takeAvailable() time.Duration { t.Lock() defer t.Unlock() + // budget accumulated since last access + now := t.clock.Now() + acc := now.Sub(t.last).Seconds() * t.refresh.Seconds() + if acc < 0 { + acc = 0 + } + // update current budget and store the current time + if t.budget = t.budget + time.Duration(acc*1e9); t.budget > t.maxBudget { + t.budget = t.maxBudget + } + t.last = now result := t.budget t.budget = time.Duration(0) return result @@ -94,6 +93,8 @@ func (t *timeBudgetImpl) returnUnused(unused time.Duration) { // We used more than allowed. return } + // add the unused time directly to the budget + // takeAvailable() will take into account the elapsed time if t.budget = t.budget + unused; t.budget > t.maxBudget { t.budget = t.maxBudget } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index b3c925e18..f52bdd053 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -27,11 +27,11 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/storage" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" + "k8s.io/utils/clock" utiltrace "k8s.io/utils/trace" ) @@ -420,17 +420,27 @@ func (w *watchCache) List() []interface{} { // You HAVE TO explicitly call w.RUnlock() after this function. func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *utiltrace.Trace) error { startTime := w.clock.Now() - go func() { - // Wake us up when the time limit has expired. The docs - // promise that time.After (well, NewTimer, which it calls) - // will wait *at least* the duration given. Since this go - // routine starts sometime after we record the start time, and - // it will wake up the loop below sometime after the broadcast, - // we don't need to worry about waking it up before the time - // has expired accidentally. - <-w.clock.After(blockTimeout) - w.cond.Broadcast() - }() + + // In case resourceVersion is 0, we accept arbitrarily stale result. + // As a result, the condition in the below for loop will never be + // satisfied (w.resourceVersion is never negative), this call will + // never hit the w.cond.Wait(). + // As a result - we can optimize the code by not firing the wakeup + // function (and avoid starting a gorotuine), especially given that + // resourceVersion=0 is the most common case. + if resourceVersion > 0 { + go func() { + // Wake us up when the time limit has expired. The docs + // promise that time.After (well, NewTimer, which it calls) + // will wait *at least* the duration given. Since this go + // routine starts sometime after we record the start time, and + // it will wake up the loop below sometime after the broadcast, + // we don't need to worry about waking it up before the time + // has expired accidentally. + <-w.clock.After(blockTimeout) + w.cond.Broadcast() + }() + } w.RLock() if trace != nil { @@ -449,12 +459,13 @@ func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *utilt return nil } -// WaitUntilFreshAndList returns list of pointers to objects. -func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64, matchValues []storage.MatchValue, trace *utiltrace.Trace) ([]interface{}, uint64, error) { +// WaitUntilFreshAndList returns list of pointers to `storeElement` objects along +// with their ResourceVersion and the name of the index, if any, that was used. +func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64, matchValues []storage.MatchValue, trace *utiltrace.Trace) ([]interface{}, uint64, string, error) { err := w.waitUntilFreshAndBlock(resourceVersion, trace) defer w.RUnlock() if err != nil { - return nil, 0, err + return nil, 0, "", err } // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only @@ -463,10 +474,10 @@ func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64, matchValues [ // TODO: if multiple indexes match, return the one with the fewest items, so as to do as much filtering as possible. for _, matchValue := range matchValues { if result, err := w.store.ByIndex(matchValue.IndexName, matchValue.Value); err == nil { - return result, w.resourceVersion, nil + return result, w.resourceVersion, matchValue.IndexName, nil } } - return w.store.List(), w.resourceVersion, nil + return w.store.List(), w.resourceVersion, "", nil } // WaitUntilFreshAndGet returns a pointers to object. @@ -621,12 +632,6 @@ func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*w return result, nil } -func (w *watchCache) GetAllEventsSince(resourceVersion uint64) ([]*watchCacheEvent, error) { - w.RLock() - defer w.RUnlock() - return w.GetAllEventsSinceThreadUnsafe(resourceVersion) -} - func (w *watchCache) Resync() error { // Nothing to do return nil diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index c10eb273e..dc9917cef 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -85,6 +85,38 @@ var ( }, []string{}, ) + listStorageCount = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_storage_list_total", + Help: "Number of LIST requests served from storage", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource"}, + ) + listStorageNumFetched = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_storage_list_fetched_objects_total", + Help: "Number of objects read from storage in the course of serving a LIST request", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource"}, + ) + listStorageNumSelectorEvals = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_storage_list_evaluated_objects_total", + Help: "Number of objects tested in the course of serving a LIST request from storage", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource"}, + ) + listStorageNumReturned = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_storage_list_returned_objects_total", + Help: "Number of objects returned for a LIST request from storage", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"resource"}, + ) ) var registerMetrics sync.Once @@ -99,6 +131,10 @@ func Register() { legacyregistry.MustRegister(dbTotalSize) legacyregistry.MustRegister(etcdBookmarkCounts) legacyregistry.MustRegister(etcdLeaseObjectCounts) + legacyregistry.MustRegister(listStorageCount) + legacyregistry.MustRegister(listStorageNumFetched) + legacyregistry.MustRegister(listStorageNumSelectorEvals) + legacyregistry.MustRegister(listStorageNumReturned) }) } @@ -139,3 +175,11 @@ func UpdateLeaseObjectCount(count int64) { // See pkg/storage/etcd3/lease_manager.go etcdLeaseObjectCounts.WithLabelValues().Observe(float64(count)) } + +// RecordListEtcd3Metrics notes various metrics of the cost to serve a LIST request +func RecordStorageListMetrics(resource string, numFetched, numEvald, numReturned int) { + listStorageCount.WithLabelValues(resource).Inc() + listStorageNumFetched.WithLabelValues(resource).Add(float64(numFetched)) + listStorageNumSelectorEvals.WithLabelValues(resource).Add(float64(numEvald)) + listStorageNumReturned.WithLabelValues(resource).Add(float64(numReturned)) +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index 5f36b7108..37d411c77 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -33,6 +33,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/conversion" @@ -64,14 +65,16 @@ func (d authenticatedDataString) AuthenticatedData() []byte { var _ value.Context = authenticatedDataString("") type store struct { - client *clientv3.Client - codec runtime.Codec - versioner storage.Versioner - transformer value.Transformer - pathPrefix string - watcher *watcher - pagingEnabled bool - leaseManager *leaseManager + client *clientv3.Client + codec runtime.Codec + versioner storage.Versioner + transformer value.Transformer + pathPrefix string + groupResource schema.GroupResource + groupResourceString string + watcher *watcher + pagingEnabled bool + leaseManager *leaseManager } type objState struct { @@ -83,11 +86,11 @@ type objState struct { } // New returns an etcd3 implementation of storage.Interface. -func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) storage.Interface { - return newStore(c, codec, newFunc, prefix, transformer, pagingEnabled, leaseManagerConfig) +func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, groupResource schema.GroupResource, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) storage.Interface { + return newStore(c, codec, newFunc, prefix, groupResource, transformer, pagingEnabled, leaseManagerConfig) } -func newStore(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) *store { +func newStore(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, groupResource schema.GroupResource, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) *store { versioner := APIObjectVersioner{} result := &store{ client: c, @@ -98,9 +101,11 @@ func newStore(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Ob // for compatibility with etcd2 impl. // no-op for default prefix of '/registry'. // keeps compatibility with etcd2 impl for custom prefixes that don't start with '/' - pathPrefix: path.Join("/", prefix), - watcher: newWatcher(c, codec, newFunc, versioner, transformer), - leaseManager: newDefaultLeaseManager(c, leaseManagerConfig), + pathPrefix: path.Join("/", prefix), + groupResource: groupResource, + groupResourceString: groupResource.String(), + watcher: newWatcher(c, codec, newFunc, versioner, transformer), + leaseManager: newDefaultLeaseManager(c, leaseManagerConfig), } return result } @@ -230,12 +235,22 @@ func (s *store) conditionalDelete( } // It's possible we're working with stale data. + // Remember the revision of the potentially stale data and the resulting update error + cachedRev := origState.rev + cachedUpdateErr := err + // Actually fetch origState, err = getCurrentState() if err != nil { return err } origStateIsCurrent = true + + // it turns out our cached data was not stale, return the error + if cachedRev == origState.rev { + return cachedUpdateErr + } + // Retry continue } @@ -246,12 +261,22 @@ func (s *store) conditionalDelete( } // It's possible we're working with stale data. + // Remember the revision of the potentially stale data and the resulting update error + cachedRev := origState.rev + cachedUpdateErr := err + // Actually fetch origState, err = getCurrentState() if err != nil { return err } origStateIsCurrent = true + + // it turns out our cached data was not stale, return the error + if cachedRev == origState.rev { + return cachedUpdateErr + } + // Retry continue } @@ -345,12 +370,22 @@ func (s *store) GuaranteedUpdate( } // It's possible we were working with stale data + // Remember the revision of the potentially stale data and the resulting update error + cachedRev := origState.rev + cachedUpdateErr := err + // Actually fetch origState, err = getCurrentState() if err != nil { return err } origStateIsCurrent = true + + // it turns out our cached data was not stale, return the error + if cachedRev == origState.rev { + return cachedUpdateErr + } + // Retry continue } @@ -694,6 +729,14 @@ func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, var lastKey []byte var hasMore bool var getResp *clientv3.GetResponse + var numFetched int + var numEvald int + // Because these metrics are for understanding the costs of handling LIST requests, + // get them recorded even in error cases. + defer func() { + numReturn := v.Len() + metrics.RecordStorageListMetrics(s.groupResourceString, numFetched, numEvald, numReturn) + }() for { startTime := time.Now() getResp, err = s.client.KV.Get(ctx, key, options...) @@ -701,6 +744,7 @@ func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, if err != nil { return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix) } + numFetched += len(getResp.Kvs) if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil { return err } @@ -719,7 +763,7 @@ func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, } // take items from the response until the bucket is full, filtering as we go - for _, kv := range getResp.Kvs { + for i, kv := range getResp.Kvs { if paging && int64(v.Len()) >= pred.Limit { hasMore = true break @@ -734,6 +778,10 @@ func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil { return err } + numEvald++ + + // free kv early. Long lists can take O(seconds) to decode. + getResp.Kvs[i] = nil } // indicate to the client which resource version was returned diff --git a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go index db72d9155..176e52a59 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go @@ -75,11 +75,11 @@ type ResponseMeta struct { } // IndexerFunc is a function that for a given object computes -// for a particular . +// `` for a particular ``. type IndexerFunc func(obj runtime.Object) string -// IndexerFuncs is a mapping from to function that -// for a given object computes . +// IndexerFuncs is a mapping from `` to function that +// for a given object computes ``. type IndexerFuncs map[string]IndexerFunc // Everything accepts all objects. @@ -88,7 +88,7 @@ var Everything = SelectionPredicate{ Field: fields.Everything(), } -// MatchValue defines a pair (, ). +// MatchValue defines a pair (``, ``). type MatchValue struct { IndexName string Value string diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go index 45cfd2ea1..aa4163877 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -21,9 +21,11 @@ import ( "go.opentelemetry.io/otel/trace" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/server/egressselector" "k8s.io/apiserver/pkg/storage/etcd3" "k8s.io/apiserver/pkg/storage/value" + flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" ) const ( @@ -84,6 +86,27 @@ type Config struct { HealthcheckTimeout time.Duration LeaseManagerConfig etcd3.LeaseManagerConfig + + // StorageObjectCountTracker is used to keep track of the total + // number of objects in the storage per resource. + StorageObjectCountTracker flowcontrolrequest.StorageObjectCountTracker +} + +// ConfigForResource is a Config specialized to a particular `schema.GroupResource` +type ConfigForResource struct { + // Config is the resource-independent configuration + Config + + // GroupResource is the relevant one + GroupResource schema.GroupResource +} + +// ForResource specializes to the given resource +func (config *Config) ForResource(resource schema.GroupResource) *ConfigForResource { + return &ConfigForResource{ + Config: *config, + GroupResource: resource, + } } func NewDefaultConfig(prefix string, codec runtime.Codec) *Config { diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index 74ebea655..63e18699d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -137,8 +137,13 @@ func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) } dialOptions := []grpc.DialOption{ grpc.WithBlock(), // block until the underlying connection is up - grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor), - grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor), + // use chained interceptors so that the default (retry and backoff) interceptors are added. + // otherwise they will be overwritten by the metric interceptor. + // + // these optional interceptors will be placed after the default ones. + // which seems to be what we want as the metrics will be collected on each attempt (retry) + grpc.WithChainUnaryInterceptor(grpcprom.UnaryClientInterceptor), + grpc.WithChainStreamInterceptor(grpcprom.StreamClientInterceptor), } if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) { tracingOpts := []otelgrpc.Option{ @@ -244,7 +249,7 @@ func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration }, nil } -func newETCD3Storage(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { +func newETCD3Storage(c storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval) if err != nil { return nil, nil, err @@ -276,7 +281,7 @@ func newETCD3Storage(c storagebackend.Config, newFunc func() runtime.Object) (st if transformer == nil { transformer = value.IdentityTransformer } - return etcd3.New(client, c.Codec, newFunc, c.Prefix, transformer, c.Paging, c.LeaseManagerConfig), destroyFunc, nil + return etcd3.New(client, c.Codec, newFunc, c.Prefix, c.GroupResource, transformer, c.Paging, c.LeaseManagerConfig), destroyFunc, nil } // startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go index fb1e2d289..68c45a18f 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go @@ -28,7 +28,7 @@ import ( type DestroyFunc func() // Create creates a storage backend based on given config. -func Create(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { +func Create(c storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { switch c.Type { case storagebackend.StorageTypeETCD2: return nil, nil, fmt.Errorf("%s is no longer a supported storage backend", c.Type) diff --git a/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go index 905523c73..8d2002923 100644 --- a/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go +++ b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go @@ -19,7 +19,7 @@ package apihelpers import ( "sort" - flowcontrol "k8s.io/api/flowcontrol/v1beta1" + flowcontrol "k8s.io/api/flowcontrol/v1beta2" ) // SetFlowSchemaCondition sets conditions. diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_context.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_context.go index 6497e3fff..1cd59049d 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_context.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_context.go @@ -52,6 +52,17 @@ func WatchInitialized(ctx context.Context) { } } +// RequestDelegated informs the priority and fairness dispatcher that +// a given request has been delegated to an aggregated API +// server. No-op when priority and fairness is disabled. +func RequestDelegated(ctx context.Context) { + // The watch initialization signal doesn't traverse request + // boundaries, so we generously fire it as soon as we know + // that the request won't be serviced locally. Safe to call + // for non-watch requests. + WatchInitialized(ctx) +} + // InitializationSignal is an interface that allows sending and handling // initialization signals. type InitializationSignal interface { diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go index bd9fd9ad9..3b2b4a173 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -29,12 +29,12 @@ import ( "sync" "time" + "github.com/google/go-cmp/cmp" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" apitypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -50,10 +50,11 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + "k8s.io/utils/clock" - flowcontrol "k8s.io/api/flowcontrol/v1beta1" - flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" - flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta1" + flowcontrol "k8s.io/api/flowcontrol/v1beta2" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" + flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta2" ) const timeFmt = "2006-01-02T15:04:05.999" @@ -67,6 +68,14 @@ const timeFmt = "2006-01-02T15:04:05.999" // undesired becomes completely unused, all the config objects are // read and processed as a whole. +// The funcs in this package follow the naming convention that the suffix +// "Locked" means the relevant mutex must be locked at the start of each +// call and will be locked upon return. For a configController, the +// suffix "ReadLocked" stipulates a read lock while just "Locked" +// stipulates a full lock. Absence of either suffix means that either +// (a) the lock must NOT be held at call time and will not be held +// upon return or (b) locking is irrelevant. + // StartFunction begins the process of handling a request. If the // request gets queued then this function uses the given hashValue as // the source of entropy as it shuffle-shards the request into a @@ -82,7 +91,6 @@ type StartFunction func(ctx context.Context, hashValue uint64) (execute bool, af type RequestDigest struct { RequestInfo *request.RequestInfo User user.Info - Width fcrequest.Width } // `*configController` maintains eventual consistency with the API @@ -91,10 +99,11 @@ type RequestDigest struct { // this type and cfgMeal follow the convention that the suffix // "Locked" means that the caller must hold the configController lock. type configController struct { - name string // varies in tests of fighting controllers - clock clock.PassiveClock - queueSetFactory fq.QueueSetFactory - obsPairGenerator metrics.TimedObserverPairGenerator + name string // varies in tests of fighting controllers + clock clock.PassiveClock + queueSetFactory fq.QueueSetFactory + reqsObsPairGenerator metrics.RatioedChangeObserverPairGenerator + execSeatsObsGenerator metrics.RatioedChangeObserverGenerator // How this controller appears in an ObjectMeta ManagedFieldsEntry.Manager asFieldManager string @@ -114,7 +123,7 @@ type configController struct { fsLister flowcontrollister.FlowSchemaLister fsInformerSynced cache.InformerSynced - flowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface + flowcontrolClient flowcontrolclient.FlowcontrolV1beta2Interface // serverConcurrencyLimit is the limit on the server's total // number of non-exempt requests being served at once. This comes @@ -124,10 +133,25 @@ type configController struct { // requestWaitLimit comes from server configuration. requestWaitLimit time.Duration + // watchTracker implements the necessary WatchTracker interface. + WatchTracker + + // the most recent update attempts, ordered by increasing age. + // Consumer trims to keep only the last minute's worth of entries. + // The controller uses this to limit itself to at most six updates + // to a given FlowSchema in any minute. + // This may only be accessed from the one and only worker goroutine. + mostRecentUpdates []updateAttempt + // This must be locked while accessing flowSchemas or - // priorityLevelStates. It is the lock involved in - // LockingWriteMultiple. - lock sync.Mutex + // priorityLevelStates. A lock for writing is needed + // for writing to any of the following: + // - the flowSchemas field + // - the slice held in the flowSchemas field + // - the priorityLevelStates field + // - the map held in the priorityLevelStates field + // - any field of a priorityLevelState held in that map + lock sync.RWMutex // flowSchemas holds the flow schema objects, sorted by increasing // numerical (decreasing logical) matching precedence. Every @@ -138,16 +162,6 @@ type configController struct { // name to the state for that level. Every name referenced from a // member of `flowSchemas` has an entry here. priorityLevelStates map[string]*priorityLevelState - - // the most recent update attempts, ordered by increasing age. - // Consumer trims to keep only the last minute's worth of entries. - // The controller uses this to limit itself to at most six updates - // to a given FlowSchema in any minute. - // This may only be accessed from the one and only worker goroutine. - mostRecentUpdates []updateAttempt - - // watchTracker implements the necessary WatchTracker interface. - WatchTracker } type updateAttempt struct { @@ -178,8 +192,11 @@ type priorityLevelState struct { // returned StartFunction numPending int - // Observers tracking number waiting, executing - obsPair metrics.TimedObserverPair + // Observers tracking number of requests waiting, executing + reqsObsPair metrics.RatioedChangeObserverPair + + // Observer of number of seats occupied throughout execution + execSeatsObs metrics.RatioedChangeObserver } // NewTestableController is extra flexible to facilitate testing @@ -188,7 +205,8 @@ func newTestableController(config TestableConfig) *configController { name: config.Name, clock: config.Clock, queueSetFactory: config.QueueSetFactory, - obsPairGenerator: config.ObsPairGenerator, + reqsObsPairGenerator: config.ReqsObsPairGenerator, + execSeatsObsGenerator: config.ExecSeatsObsGenerator, asFieldManager: config.AsFieldManager, foundToDangling: config.FoundToDangling, serverConcurrencyLimit: config.ServerConcurrencyLimit, @@ -203,7 +221,7 @@ func newTestableController(config TestableConfig) *configController { cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue") // ensure the data structure reflects the mandatory config cfgCtlr.lockAndDigestConfigObjects(nil, nil) - fci := config.InformerFactory.Flowcontrol().V1beta1() + fci := config.InformerFactory.Flowcontrol().V1beta2() pli := fci.PriorityLevelConfigurations() fsi := fci.FlowSchemas() cfgCtlr.plLister = pli.Lister() @@ -281,8 +299,8 @@ func (cfgCtlr *configController) MaintainObservations(stopCh <-chan struct{}) { } func (cfgCtlr *configController) updateObservations() { - cfgCtlr.lock.Lock() - defer cfgCtlr.lock.Unlock() + cfgCtlr.lock.RLock() + defer cfgCtlr.lock.RUnlock() for _, plc := range cfgCtlr.priorityLevelStates { if plc.queues != nil { plc.queues.UpdateObservations() @@ -327,7 +345,7 @@ func (cfgCtlr *configController) processNextWorkItem() bool { func(obj interface{}) { defer cfgCtlr.configQueue.Done(obj) - specificDelay, err := cfgCtlr.syncOne(map[string]string{}) + specificDelay, err := cfgCtlr.syncOne() switch { case err != nil: klog.Error(err) @@ -346,7 +364,7 @@ func (cfgCtlr *configController) processNextWorkItem() bool { // objects that configure API Priority and Fairness and updates the // local configController accordingly. // Only invoke this in the one and only worker goroutine -func (cfgCtlr *configController) syncOne(flowSchemaRVs map[string]string) (specificDelay time.Duration, err error) { +func (cfgCtlr *configController) syncOne() (specificDelay time.Duration, err error) { klog.V(5).Infof("%s syncOne at %s", cfgCtlr.name, cfgCtlr.clock.Now().Format(timeFmt)) all := labels.Everything() newPLs, err := cfgCtlr.plLister.List(all) @@ -357,7 +375,7 @@ func (cfgCtlr *configController) syncOne(flowSchemaRVs map[string]string) (speci if err != nil { return 0, fmt.Errorf("unable to list FlowSchema objects: %w", err) } - return cfgCtlr.digestConfigObjects(newPLs, newFSs, flowSchemaRVs) + return cfgCtlr.digestConfigObjects(newPLs, newFSs) } // cfgMeal is the data involved in the process of digesting the API @@ -398,7 +416,7 @@ type fsStatusUpdate struct { // digestConfigObjects is given all the API objects that configure // cfgCtlr and writes its consequent new configState. // Only invoke this in the one and only worker goroutine -func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.PriorityLevelConfiguration, newFSs []*flowcontrol.FlowSchema, flowSchemaRVs map[string]string) (time.Duration, error) { +func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.PriorityLevelConfiguration, newFSs []*flowcontrol.FlowSchema) (time.Duration, error) { fsStatusUpdates := cfgCtlr.lockAndDigestConfigObjects(newPLs, newFSs) var errs []error currResult := updateAttempt{ @@ -417,26 +435,26 @@ func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.Prior // if we are going to issue an update, be sure we track every name we update so we know if we update it too often. currResult.updatedItems.Insert(fsu.flowSchema.Name) - - enc, err := json.Marshal(fsu.condition) + patchBytes, err := makeFlowSchemaConditionPatch(fsu.condition) if err != nil { // should never happen because these conditions are created here and well formed panic(fmt.Sprintf("Failed to json.Marshall(%#+v): %s", fsu.condition, err.Error())) } - klog.V(4).Infof("%s writing Condition %s to FlowSchema %s, which had ResourceVersion=%s, because its previous value was %s", cfgCtlr.name, string(enc), fsu.flowSchema.Name, fsu.flowSchema.ResourceVersion, fcfmt.Fmt(fsu.oldValue)) + if klog.V(4).Enabled() { + klog.V(4).Infof("%s writing Condition %s to FlowSchema %s, which had ResourceVersion=%s, because its previous value was %s, diff: %s", + cfgCtlr.name, fsu.condition, fsu.flowSchema.Name, fsu.flowSchema.ResourceVersion, fcfmt.Fmt(fsu.oldValue), cmp.Diff(fsu.oldValue, fsu.condition)) + } fsIfc := cfgCtlr.flowcontrolClient.FlowSchemas() - patchBytes := []byte(fmt.Sprintf(`{"status": {"conditions": [ %s ] } }`, string(enc))) patchOptions := metav1.PatchOptions{FieldManager: cfgCtlr.asFieldManager} - patchedFlowSchema, err := fsIfc.Patch(context.TODO(), fsu.flowSchema.Name, apitypes.StrategicMergePatchType, patchBytes, patchOptions, "status") - if err == nil { - key, _ := cache.MetaNamespaceKeyFunc(patchedFlowSchema) - flowSchemaRVs[key] = patchedFlowSchema.ResourceVersion - } else if apierrors.IsNotFound(err) { - // This object has been deleted. A notification is coming - // and nothing more needs to be done here. - klog.V(5).Infof("%s at %s: attempted update of concurrently deleted FlowSchema %s; nothing more needs to be done", cfgCtlr.name, cfgCtlr.clock.Now().Format(timeFmt), fsu.flowSchema.Name) - } else { - errs = append(errs, fmt.Errorf("failed to set a status.condition for FlowSchema %s: %w", fsu.flowSchema.Name, err)) + _, err = fsIfc.Patch(context.TODO(), fsu.flowSchema.Name, apitypes.StrategicMergePatchType, patchBytes, patchOptions, "status") + if err != nil { + if apierrors.IsNotFound(err) { + // This object has been deleted. A notification is coming + // and nothing more needs to be done here. + klog.V(5).Infof("%s at %s: attempted update of concurrently deleted FlowSchema %s; nothing more needs to be done", cfgCtlr.name, cfgCtlr.clock.Now().Format(timeFmt), fsu.flowSchema.Name) + } else { + errs = append(errs, fmt.Errorf("failed to set a status.condition for FlowSchema %s: %w", fsu.flowSchema.Name, err)) + } } } cfgCtlr.addUpdateResult(currResult) @@ -444,6 +462,20 @@ func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.Prior return suggestedDelay, utilerrors.NewAggregate(errs) } +// makeFlowSchemaConditionPatch takes in a condition and returns the patch status as a json. +func makeFlowSchemaConditionPatch(condition flowcontrol.FlowSchemaCondition) ([]byte, error) { + o := struct { + Status flowcontrol.FlowSchemaStatus `json:"status"` + }{ + Status: flowcontrol.FlowSchemaStatus{ + Conditions: []flowcontrol.FlowSchemaCondition{ + condition, + }, + }, + } + return json.Marshal(o) +} + // shouldDelayUpdate checks to see if a flowschema has been updated too often and returns true if a delay is needed. // Only invoke this in the one and only worker goroutine func (cfgCtlr *configController) shouldDelayUpdate(flowSchemaName string) bool { @@ -506,9 +538,10 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi for _, pl := range newPLs { state := meal.cfgCtlr.priorityLevelStates[pl.Name] if state == nil { - state = &priorityLevelState{obsPair: meal.cfgCtlr.obsPairGenerator.Generate(1, 1, []string{pl.Name})} + labelValues := []string{pl.Name} + state = &priorityLevelState{reqsObsPair: meal.cfgCtlr.reqsObsPairGenerator.Generate(1, 1, labelValues), execSeatsObs: meal.cfgCtlr.execSeatsObsGenerator.Generate(1, 1, labelValues)} } - qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues, pl, meal.cfgCtlr.requestWaitLimit, state.obsPair) + qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues, pl, meal.cfgCtlr.requestWaitLimit, state.reqsObsPair, state.execSeatsObs) if err != nil { klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err) continue @@ -611,7 +644,7 @@ func (meal *cfgMeal) processOldPLsLocked() { } } var err error - plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues, plState.pl, meal.cfgCtlr.requestWaitLimit, plState.obsPair) + plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues, plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsObsPair, plState.execSeatsObs) if err != nil { // This can not happen because queueSetCompleterForPL already approved this config panic(fmt.Sprintf("%s from name=%q spec=%s", err, plName, fcfmt.Fmt(plState.pl.Spec))) @@ -660,7 +693,7 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() { // given priority level configuration. Returns nil if that config // does not call for limiting. Returns nil and an error if the given // object is malformed in a way that is a problem for this package. -func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, intPair metrics.TimedObserverPair) (fq.QueueSetCompleter, error) { +func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedChangeObserverPair, execSeatsObs metrics.RatioedChangeObserver) (fq.QueueSetCompleter, error) { if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt) != (pl.Spec.Limited == nil) { return nil, errors.New("broken union structure at the top") } @@ -689,7 +722,7 @@ func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flow if queues != nil { qsc, err = queues.BeginConfigChange(qcQS) } else { - qsc, err = qsf.BeginConstruction(qcQS, intPair) + qsc, err = qsf.BeginConstruction(qcQS, reqsIntPair, execSeatsObs) } if err != nil { err = fmt.Errorf("priority level %q has QueuingConfiguration %#+v, which is invalid: %w", pl.Name, qcAPI, err) @@ -734,17 +767,20 @@ func (meal *cfgMeal) presyncFlowSchemaStatus(fs *flowcontrol.FlowSchema, isDangl // that does not actually exist (right now) as a real API object. func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration) { klog.V(3).Infof("No %s PriorityLevelConfiguration found, imagining one", proto.Name) - obsPair := meal.cfgCtlr.obsPairGenerator.Generate(1, 1, []string{proto.Name}) - qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, requestWaitLimit, obsPair) + labelValues := []string{proto.Name} + reqsObsPair := meal.cfgCtlr.reqsObsPairGenerator.Generate(1, 1, labelValues) + execSeatsObs := meal.cfgCtlr.execSeatsObsGenerator.Generate(1, 1, labelValues) + qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, requestWaitLimit, reqsObsPair, execSeatsObs) if err != nil { // This can not happen because proto is one of the mandatory // objects and these are not erroneous panic(err) } meal.newPLStates[proto.Name] = &priorityLevelState{ - pl: proto, - qsCompleter: qsCompleter, - obsPair: obsPair, + pl: proto, + qsCompleter: qsCompleter, + reqsObsPair: reqsObsPair, + execSeatsObs: execSeatsObs, } if proto.Spec.Limited != nil { meal.shareSum += float64(proto.Spec.Limited.AssuredConcurrencyShares) @@ -763,10 +799,10 @@ func (immediateRequest) Finish(execute func()) bool { // The returned bool indicates whether the request is exempt from // limitation. The startWaitingTime is when the request started // waiting in its queue, or `Time{}` if this did not happen. -func (cfgCtlr *configController) startRequest(ctx context.Context, rd RequestDigest, queueNoteFn fq.QueueNoteFn) (fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, isExempt bool, req fq.Request, startWaitingTime time.Time) { +func (cfgCtlr *configController) startRequest(ctx context.Context, rd RequestDigest, workEstimator func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, flowDistinguisher string) fcrequest.WorkEstimate, queueNoteFn fq.QueueNoteFn) (fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, isExempt bool, req fq.Request, startWaitingTime time.Time) { klog.V(7).Infof("startRequest(%#+v)", rd) - cfgCtlr.lock.Lock() - defer cfgCtlr.lock.Unlock() + cfgCtlr.lock.RLock() + defer cfgCtlr.lock.RUnlock() var selectedFlowSchema, catchAllFlowSchema *flowcontrol.FlowSchema for _, fs := range cfgCtlr.flowSchemas { if matchesFlowSchema(rd, fs) { @@ -807,11 +843,12 @@ func (cfgCtlr *configController) startRequest(ctx context.Context, rd RequestDig flowDistinguisher = computeFlowDistinguisher(rd, selectedFlowSchema.Spec.DistinguisherMethod) hashValue = hashFlowID(selectedFlowSchema.Name, flowDistinguisher) } + workEstimate := workEstimator(selectedFlowSchema, plState.pl, flowDistinguisher) startWaitingTime = time.Now() klog.V(7).Infof("startRequest(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, numQueues=%d", rd, selectedFlowSchema.Name, selectedFlowSchema.Spec.DistinguisherMethod, plName, numQueues) - req, idle := plState.queues.StartRequest(ctx, &rd.Width, hashValue, flowDistinguisher, selectedFlowSchema.Name, rd.RequestInfo, rd.User, queueNoteFn) + req, idle := plState.queues.StartRequest(ctx, &workEstimate, hashValue, flowDistinguisher, selectedFlowSchema.Name, rd.RequestInfo, rd.User, queueNoteFn) if idle { - cfgCtlr.maybeReapLocked(plName, plState) + cfgCtlr.maybeReapReadLocked(plName, plState) } return selectedFlowSchema, plState.pl, false, req, startWaitingTime } @@ -820,8 +857,8 @@ func (cfgCtlr *configController) startRequest(ctx context.Context, rd RequestDig // priority level if it has no more use. Call this after getting a // clue that the given priority level is undesired and idle. func (cfgCtlr *configController) maybeReap(plName string) { - cfgCtlr.lock.Lock() - defer cfgCtlr.lock.Unlock() + cfgCtlr.lock.RLock() + defer cfgCtlr.lock.RUnlock() plState := cfgCtlr.priorityLevelStates[plName] if plState == nil { klog.V(7).Infof("plName=%s, plState==nil", plName) @@ -843,7 +880,7 @@ func (cfgCtlr *configController) maybeReap(plName string) { // it has no more use. Call this if both (1) plState.queues is // non-nil and reported being idle, and (2) cfgCtlr's lock has not // been released since then. -func (cfgCtlr *configController) maybeReapLocked(plName string, plState *priorityLevelState) { +func (cfgCtlr *configController) maybeReapReadLocked(plName string, plState *priorityLevelState) { if !(plState.quiescing && plState.numPending == 0) { return } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go index 3c2c9fc74..91c49a4be 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go @@ -102,7 +102,11 @@ func (cfgCtlr *configController) dumpQueues(w http.ResponseWriter, r *http.Reque "Index", // 2 "PendingRequests", // 3 "ExecutingRequests", // 4 - "VirtualStart", // 5 + "SeatsInUse", // 5 + "NextDispatchR", // 6 + "InitialSeatsSum", // 7 + "MaxSeatsSum", // 8 + "TotalWorkSum", // 9 } tabPrint(tabWriter, rowForHeaders(columnHeaders)) endLine(tabWriter) @@ -114,18 +118,26 @@ func (cfgCtlr *configController) dumpQueues(w http.ResponseWriter, r *http.Reque "", // 3 "", // 4 "", // 5 + "", // 6 + "", // 7 + "", // 8 + "", // 9 )) endLine(tabWriter) continue } queueSetDigest := plState.queues.Dump(false) for i, q := range queueSetDigest.Queues { - tabPrint(tabWriter, rowForQueue( - plState.pl.Name, // 1 - i, // 2 - len(q.Requests), // 3 - q.ExecutingRequests, // 4 - q.VirtualStart, // 5 + tabPrint(tabWriter, row( + plState.pl.Name, // 1 - "PriorityLevelName" + strconv.Itoa(i), // 2 - "Index" + strconv.Itoa(len(q.Requests)), // 3 - "PendingRequests" + strconv.Itoa(q.ExecutingRequests), // 4 - "ExecutingRequests" + strconv.Itoa(q.SeatsInUse), // 5 - "SeatsInUse" + q.NextDispatchR, // 6 - "NextDispatchR" + strconv.Itoa(q.QueueSum.InitialSeatsSum), // 7 - "InitialSeatsSum" + strconv.Itoa(q.QueueSum.MaxSeatsSum), // 8 - "MaxSeatsSum" + q.QueueSum.TotalWorkSum, // 9 - "TotalWorkSum" )) endLine(tabWriter) } @@ -147,18 +159,21 @@ func (cfgCtlr *configController) dumpRequests(w http.ResponseWriter, r *http.Req "RequestIndexInQueue", // 4 "FlowDistingsher", // 5 "ArriveTime", // 6 + "InitialSeats", // 7 + "FinalSeats", // 8 + "AdditionalLatency", // 9 })) if includeRequestDetails { continueLine(tabWriter) tabPrint(tabWriter, rowForHeaders([]string{ - "UserName", // 7 - "Verb", // 8 - "APIPath", // 9 - "Namespace", // 10 - "Name", // 11 - "APIVersion", // 12 - "Resource", // 13 - "SubResource", // 14 + "UserName", // 10 + "Verb", // 11 + "APIPath", // 12 + "Namespace", // 13 + "Name", // 14 + "APIVersion", // 15 + "Resource", // 16 + "SubResource", // 17 })) } endLine(tabWriter) @@ -169,28 +184,31 @@ func (cfgCtlr *configController) dumpRequests(w http.ResponseWriter, r *http.Req queueSetDigest := plState.queues.Dump(includeRequestDetails) for iq, q := range queueSetDigest.Queues { for ir, r := range q.Requests { - tabPrint(tabWriter, rowForRequest( + tabPrint(tabWriter, row( plState.pl.Name, // 1 r.MatchedFlowSchema, // 2 - iq, // 3 - ir, // 4 + strconv.Itoa(iq), // 3 + strconv.Itoa(ir), // 4 r.FlowDistinguisher, // 5 - r.ArriveTime, // 6 + r.ArriveTime.UTC().Format(time.RFC3339Nano), // 6 + strconv.Itoa(int(r.WorkEstimate.InitialSeats)), // 7 + strconv.Itoa(int(r.WorkEstimate.FinalSeats)), // 8 + r.WorkEstimate.AdditionalLatency.String(), // 9 )) if includeRequestDetails { continueLine(tabWriter) tabPrint(tabWriter, rowForRequestDetails( - r.UserName, // 7 - r.RequestInfo.Verb, // 8 - r.RequestInfo.Path, // 9 - r.RequestInfo.Namespace, // 10 - r.RequestInfo.Name, // 11 + r.UserName, // 10 + r.RequestInfo.Verb, // 11 + r.RequestInfo.Path, // 12 + r.RequestInfo.Namespace, // 13 + r.RequestInfo.Name, // 14 schema.GroupVersion{ Group: r.RequestInfo.APIGroup, Version: r.RequestInfo.APIVersion, - }.String(), // 12 - r.RequestInfo.Resource, // 13 - r.RequestInfo.Subresource, // 14 + }.String(), // 15 + r.RequestInfo.Resource, // 16 + r.RequestInfo.Subresource, // 17 )) } endLine(tabWriter) @@ -229,27 +247,6 @@ func rowForPriorityLevel(plName string, activeQueues int, isIdle, isQuiescing bo ) } -func rowForQueue(plName string, index, waitingRequests, executingRequests int, virtualStart float64) string { - return row( - plName, - strconv.Itoa(index), - strconv.Itoa(waitingRequests), - strconv.Itoa(executingRequests), - fmt.Sprintf("%.4f", virtualStart), - ) -} - -func rowForRequest(plName, fsName string, queueIndex, requestIndex int, flowDistinguisher string, arriveTime time.Time) string { - return row( - plName, - fsName, - strconv.Itoa(queueIndex), - strconv.Itoa(requestIndex), - flowDistinguisher, - arriveTime.UTC().Format(time.RFC3339Nano), - ) -} - func rowForRequestDetails(username, verb, path, namespace, name, apiVersion, resource, subResource string) string { return row( username, diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go index 8d914e796..67149fd83 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go @@ -21,17 +21,18 @@ import ( "strconv" "time" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apiserver/pkg/server/mux" - "k8s.io/apiserver/pkg/util/flowcontrol/counter" fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" + "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/eventclock" fqs "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset" "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + fcrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" kubeinformers "k8s.io/client-go/informers" "k8s.io/klog/v2" + "k8s.io/utils/clock" - flowcontrol "k8s.io/api/flowcontrol/v1beta1" - flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + flowcontrol "k8s.io/api/flowcontrol/v1beta2" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" ) // ConfigConsumerAsFieldManager is how the config consuminng @@ -41,8 +42,9 @@ const ConfigConsumerAsFieldManager = "api-priority-and-fairness-config-consumer- // Interface defines how the API Priority and Fairness filter interacts with the underlying system. type Interface interface { // Handle takes care of queuing and dispatching a request - // characterized by the given digest. The given `noteFn` will be - // invoked with the results of request classification. If the + // characterized by the given digest. The given `workEstimator` will be + // invoked with the results of request classification and must return the + // work parameters for the request. If the // request is queued then `queueNoteFn` will be called twice, // first with `true` and then with `false`; otherwise // `queueNoteFn` will not be called at all. If Handle decides @@ -53,7 +55,7 @@ type Interface interface { // ctx is cancelled or times out. Handle(ctx context.Context, requestDigest RequestDigest, - noteFn func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration), + workEstimator func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, flowDistinguisher string) fcrequest.WorkEstimate, queueNoteFn fq.QueueNoteFn, execFn func(), ) @@ -78,12 +80,11 @@ type Interface interface { // New creates a new instance to implement API priority and fairness func New( informerFactory kubeinformers.SharedInformerFactory, - flowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface, + flowcontrolClient flowcontrolclient.FlowcontrolV1beta2Interface, serverConcurrencyLimit int, requestWaitLimit time.Duration, ) Interface { - grc := counter.NoOp{} - clk := clock.RealClock{} + clk := eventclock.Real{} return NewTestable(TestableConfig{ Name: "Controller", Clock: clk, @@ -93,8 +94,9 @@ func New( FlowcontrolClient: flowcontrolClient, ServerConcurrencyLimit: serverConcurrencyLimit, RequestWaitLimit: requestWaitLimit, - ObsPairGenerator: metrics.PriorityLevelConcurrencyObserverPairGenerator, - QueueSetFactory: fqs.NewQueueSetFactory(clk, grc), + ReqsObsPairGenerator: metrics.PriorityLevelConcurrencyObserverPairGenerator, + ExecSeatsObsGenerator: metrics.PriorityLevelExecutionSeatsObserverGenerator, + QueueSetFactory: fqs.NewQueueSetFactory(clk), }) } @@ -124,7 +126,7 @@ type TestableConfig struct { InformerFactory kubeinformers.SharedInformerFactory // FlowcontrolClient to use for manipulating config objects - FlowcontrolClient flowcontrolclient.FlowcontrolV1beta1Interface + FlowcontrolClient flowcontrolclient.FlowcontrolV1beta2Interface // ServerConcurrencyLimit for the controller to enforce ServerConcurrencyLimit int @@ -132,8 +134,11 @@ type TestableConfig struct { // RequestWaitLimit configured on the server RequestWaitLimit time.Duration - // ObsPairGenerator for metrics - ObsPairGenerator metrics.TimedObserverPairGenerator + // ObsPairGenerator for metrics about requests + ReqsObsPairGenerator metrics.RatioedChangeObserverPairGenerator + + // RatioedChangeObserverPairGenerator for metrics about seats occupied by all phases of execution + ExecSeatsObsGenerator metrics.RatioedChangeObserverGenerator // QueueSetFactory for the queuing implementation QueueSetFactory fq.QueueSetFactory @@ -145,12 +150,11 @@ func NewTestable(config TestableConfig) Interface { } func (cfgCtlr *configController) Handle(ctx context.Context, requestDigest RequestDigest, - noteFn func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration), + workEstimator func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, flowDistinguisher string) fcrequest.WorkEstimate, queueNoteFn fq.QueueNoteFn, execFn func()) { - fs, pl, isExempt, req, startWaitingTime := cfgCtlr.startRequest(ctx, requestDigest, queueNoteFn) + fs, pl, isExempt, req, startWaitingTime := cfgCtlr.startRequest(ctx, requestDigest, workEstimator, queueNoteFn) queued := startWaitingTime != time.Time{} - noteFn(fs, pl) if req == nil { if queued { metrics.ObserveWaitingDuration(ctx, pl.Name, fs.Name, strconv.FormatBool(req != nil), time.Since(startWaitingTime)) diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go deleted file mode 100644 index 0418e1217..000000000 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/interface.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package counter - -// GoRoutineCounter keeps track of the number of active goroutines -// working on/for something. This is a utility that makes such code more -// testable. The code uses this utility to report the number of active -// goroutines to the test code, so that the test code can advance a fake -// clock when and only when the code being tested has finished all -// the work that is ready to do at the present time. -type GoRoutineCounter interface { - // Add adds the given delta to the count of active goroutines. - // Call Add(1) before forking a goroutine, Add(-1) at the end of that goroutine. - // Call Add(-1) just before waiting on something from another goroutine (e.g., - // just before a `select`). - // Call Add(1) just before doing something that unblocks a goroutine that is - // waiting on that something. - Add(delta int) -} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go index 9d50c3e21..b3cdb17bb 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go @@ -20,6 +20,7 @@ import ( "time" "k8s.io/apiserver/pkg/endpoints/request" + flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" ) // QueueSetDump is an instant dump of queue-set. @@ -32,18 +33,26 @@ type QueueSetDump struct { // QueueDump is an instant dump of one queue in a queue-set. type QueueDump struct { + QueueSum QueueSum Requests []RequestDump - VirtualStart float64 + NextDispatchR string ExecutingRequests int SeatsInUse int } +type QueueSum struct { + InitialSeatsSum int + MaxSeatsSum int + TotalWorkSum string +} + // RequestDump is an instant dump of one requests pending in the queue. type RequestDump struct { MatchedFlowSchema string FlowDistinguisher string ArriveTime time.Time StartTime time.Time + WorkEstimate flowcontrolrequest.WorkEstimate // request details UserName string RequestInfo request.RequestInfo diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/eventclock/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/eventclock/interface.go new file mode 100644 index 000000000..58f88b992 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/eventclock/interface.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eventclock + +import ( + "time" + + baseclock "k8s.io/utils/clock" +) + +// EventFunc does some work that needs to be done at or after the +// given time. +type EventFunc func(time.Time) + +// EventClock is an active clock abstraction for use in code that is +// testable with a fake clock that itself determines how time may be +// advanced. The timing paradigm is invoking EventFuncs rather than +// synchronizing through channels, so that the fake clock has a handle +// on when associated activity is done. +type Interface interface { + baseclock.PassiveClock + + // Sleep returns after the given duration (or more). + Sleep(d time.Duration) + + // EventAfterDuration invokes the given EventFunc after the given duration (or more), + // passing the time when the invocation was launched. + EventAfterDuration(f EventFunc, d time.Duration) + + // EventAfterTime invokes the given EventFunc at the given time or later, + // passing the time when the invocation was launched. + EventAfterTime(f EventFunc, t time.Time) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/eventclock/real.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/eventclock/real.go new file mode 100644 index 000000000..d567a0f45 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/eventclock/real.go @@ -0,0 +1,44 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eventclock + +import ( + "time" + + "k8s.io/utils/clock" +) + +// RealEventClock fires event on real world time +type Real struct { + clock.RealClock +} + +var _ Interface = Real{} + +// EventAfterDuration schedules an EventFunc +func (Real) EventAfterDuration(f EventFunc, d time.Duration) { + ch := time.After(d) + go func() { + t := <-ch + f(t) + }() +} + +// EventAfterTime schedules an EventFunc +func (r Real) EventAfterTime(f EventFunc, t time.Time) { + r.EventAfterDuration(f, time.Until(t)) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go index dcba6f2c2..800fa765f 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go @@ -21,17 +21,16 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apiserver/pkg/util/flowcontrol/metrics" + "k8s.io/utils/clock" ) // Integrator computes the moments of some variable X over time as // read from a particular clock. The integrals start when the // Integrator is created, and ends at the latest operation on the -// Integrator. As a `metrics.TimedObserver` this fixes X1=1 and -// ignores attempts to change X1. +// Integrator. type Integrator interface { - metrics.TimedObserver + metrics.ChangeObserver GetResults() IntegratorResults @@ -70,10 +69,7 @@ func NewIntegrator(clock clock.PassiveClock) Integrator { } } -func (igr *integrator) SetX1(x1 float64) { -} - -func (igr *integrator) Set(x float64) { +func (igr *integrator) Observe(x float64) { igr.Lock() igr.setLocked(x) igr.Unlock() diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go index 2de44c350..cc772d8b7 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go @@ -31,8 +31,11 @@ import ( // are separated so that errors from the first phase can be found // before committing to a concurrency allotment for the second. type QueueSetFactory interface { - // BeginConstruction does the first phase of creating a QueueSet - BeginConstruction(QueuingConfig, metrics.TimedObserverPair) (QueueSetCompleter, error) + // BeginConstruction does the first phase of creating a QueueSet. + // The RatioedChangeObserverPair observes number of requests, + // execution covering just the regular phase. + // The RatioedChangeObserver observes number of seats occupied through all phases of execution. + BeginConstruction(QueuingConfig, metrics.RatioedChangeObserverPair, metrics.RatioedChangeObserver) (QueueSetCompleter, error) } // QueueSetCompleter finishes the two-step process of creating or @@ -81,7 +84,7 @@ type QueueSet interface { // was idle at the moment of the return. Otherwise idle==false // and the client must call the Finish method of the Request // exactly once. - StartRequest(ctx context.Context, width *request.Width, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn QueueNoteFn) (req Request, idle bool) + StartRequest(ctx context.Context, width *request.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn QueueNoteFn) (req Request, idle bool) // UpdateObservations makes sure any time-based statistics have // caught up with the current clock reading diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go index 1977f7522..b2e3adbdc 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/interface.go @@ -16,47 +16,15 @@ limitations under the License. package promise -// This file defines interfaces for promises and futures and related -// things. These are about coordination among multiple goroutines and -// so are safe for concurrent calls --- although moderated in some -// cases by a requirement that the caller hold a certain lock. - -// Readable represents a variable that is initially not set and later -// becomes set. Some instances may be set to multiple values in -// series. A Readable for a variable that can only get one value is -// commonly known as a "future". -type Readable interface { - // Get reads the current value of this variable. If this variable - // is not set yet then this call blocks until this variable gets a - // value. +// WriteOnce represents a variable that is initially not set and can +// be set once and is readable. This is the common meaning for +// "promise". +type WriteOnce interface { + // Get reads the current value of this variable. If this + // variable is not set yet then this call blocks until this + // variable gets a value. Get() interface{} - // IsSet returns immediately with an indication of whether this - // variable has been set. - IsSet() bool -} - -// LockingReadable is a Readable whose implementation is protected by -// a lock -type LockingReadable interface { - Readable - - // GetLocked is like Get but the caller must already hold the - // lock. GetLocked may release, and later re-acquire, the lock - // any number of times. Get may acquire, and later release, the - // lock any number of times. - GetLocked() interface{} - - // IsSetLocked is like IsSet but the caller must already hold the - // lock. IsSetLocked may release, and later re-acquire, the lock - // any number of times. IsSet may acquire, and later release, the - // lock any number of times. - IsSetLocked() bool -} - -// WriteOnceOnly represents a variable that is initially not set and -// can be set once. -type WriteOnceOnly interface { // Set normally writes a value into this variable, unblocks every // goroutine waiting for this variable to have a value, and // returns true. In the unhappy case that this variable is @@ -64,66 +32,3 @@ type WriteOnceOnly interface { // variable's value. Set(interface{}) bool } - -// WriteOnce represents a variable that is initially not set and can -// be set once and is readable. This is the common meaning for -// "promise". -type WriteOnce interface { - Readable - WriteOnceOnly -} - -// LockingWriteOnceOnly is a WriteOnceOnly whose implementation is -// protected by a lock. -type LockingWriteOnceOnly interface { - WriteOnceOnly - - // SetLocked is like Set but the caller must already hold the - // lock. SetLocked may release, and later re-acquire, the lock - // any number of times. Set may acquire, and later release, the - // lock any number of times - SetLocked(interface{}) bool -} - -// LockingWriteOnce is a WriteOnce whose implementation is protected -// by a lock. -type LockingWriteOnce interface { - LockingReadable - LockingWriteOnceOnly -} - -// WriteMultipleOnly represents a variable that is initially not set -// and can be set one or more times (unlike a traditional "promise", -// which can be written only once). -type WriteMultipleOnly interface { - // Set writes a value into this variable and unblocks every - // goroutine waiting for this variable to have a value - Set(interface{}) -} - -// WriteMultiple represents a variable that is initially not set and -// can be set one or more times (unlike a traditional "promise", which -// can be written only once) and is readable. -type WriteMultiple interface { - Readable - WriteMultipleOnly -} - -// LockingWriteMultipleOnly is a WriteMultipleOnly whose -// implementation is protected by a lock. -type LockingWriteMultipleOnly interface { - WriteMultipleOnly - - // SetLocked is like Set but the caller must already hold the - // lock. SetLocked may release, and later re-acquire, the lock - // any number of times. Set may acquire, and later release, the - // lock any number of times - SetLocked(interface{}) -} - -// LockingWriteMultiple is a WriteMultiple whose implementation is -// protected by a lock. -type LockingWriteMultiple interface { - LockingReadable - LockingWriteMultipleOnly -} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go deleted file mode 100644 index db5598f89..000000000 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise/lockingpromise.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lockingpromise - -import ( - "sync" - - "k8s.io/apiserver/pkg/util/flowcontrol/counter" - "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise" -) - -// promisoid is the data and behavior common to all the promise-like -// abstractions implemented here. This implementation is based on a -// condition variable. This implementation tracks active goroutines: -// the given counter is decremented for a goroutine waiting for this -// varible to be set and incremented when such a goroutine is -// unblocked. -type promisoid struct { - lock sync.Locker - cond sync.Cond - activeCounter counter.GoRoutineCounter // counter of active goroutines - waitingCount int // number of goroutines idle due to this being unset - isSet bool - value interface{} -} - -func (pr *promisoid) Get() interface{} { - pr.lock.Lock() - defer pr.lock.Unlock() - return pr.GetLocked() -} - -func (pr *promisoid) GetLocked() interface{} { - if !pr.isSet { - pr.waitingCount++ - pr.activeCounter.Add(-1) - pr.cond.Wait() - } - return pr.value -} - -func (pr *promisoid) IsSet() bool { - pr.lock.Lock() - defer pr.lock.Unlock() - return pr.IsSetLocked() -} - -func (pr *promisoid) IsSetLocked() bool { - return pr.isSet -} - -func (pr *promisoid) SetLocked(value interface{}) { - pr.isSet = true - pr.value = value - if pr.waitingCount > 0 { - pr.activeCounter.Add(pr.waitingCount) - pr.waitingCount = 0 - pr.cond.Broadcast() - } -} - -type writeOnce struct { - promisoid -} - -var _ promise.LockingWriteOnce = &writeOnce{} - -// NewWriteOnce makes a new promise.LockingWriteOnce -func NewWriteOnce(lock sync.Locker, activeCounter counter.GoRoutineCounter) promise.LockingWriteOnce { - return &writeOnce{promisoid{ - lock: lock, - cond: *sync.NewCond(lock), - activeCounter: activeCounter, - }} -} - -func (wr *writeOnce) Set(value interface{}) bool { - wr.lock.Lock() - defer wr.lock.Unlock() - return wr.SetLocked(value) -} - -func (wr *writeOnce) SetLocked(value interface{}) bool { - if wr.isSet { - return false - } - wr.promisoid.SetLocked(value) - return true -} - -type writeMultiple struct { - promisoid -} - -var _ promise.LockingWriteMultiple = &writeMultiple{} - -// NewWriteMultiple makes a new promise.LockingWriteMultiple -func NewWriteMultiple(lock sync.Locker, activeCounter counter.GoRoutineCounter) promise.LockingWriteMultiple { - return &writeMultiple{promisoid{ - lock: lock, - cond: *sync.NewCond(lock), - activeCounter: activeCounter, - }} -} - -func (wr *writeMultiple) Set(value interface{}) { - wr.lock.Lock() - defer wr.lock.Unlock() - wr.SetLocked(value) -} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go new file mode 100644 index 000000000..d3bda40aa --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package promise + +import ( + "sync" +) + +// promise implements the WriteOnce interface. +type promise struct { + doneCh <-chan struct{} + doneVal interface{} + setCh chan struct{} + onceler sync.Once + value interface{} +} + +var _ WriteOnce = &promise{} + +// NewWriteOnce makes a new thread-safe WriteOnce. +// +// If `initial` is non-nil then that value is Set at creation time. +// +// If a `Get` is waiting soon after `doneCh` becomes selectable (which +// never happens for the nil channel) then `Set(doneVal)` effectively +// happens at that time. +func NewWriteOnce(initial interface{}, doneCh <-chan struct{}, doneVal interface{}) WriteOnce { + p := &promise{ + doneCh: doneCh, + doneVal: doneVal, + setCh: make(chan struct{}), + } + if initial != nil { + p.Set(initial) + } + return p +} + +func (p *promise) Get() interface{} { + select { + case <-p.setCh: + case <-p.doneCh: + p.Set(p.doneVal) + } + return p.value +} + +func (p *promise) Set(value interface{}) bool { + var ans bool + p.onceler.Do(func() { + p.value = value + close(p.setCh) + ans = true + }) + return ans +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/fifo_list.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/fifo_list.go index 5c7e7acf5..eb56e1e94 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/fifo_list.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/fifo_list.go @@ -20,22 +20,25 @@ import ( "container/list" ) -// removeFromFIFOFunc removes a designated element from the list. -// The complexity of the runtime cost is O(1) -// It returns the request removed from the list. +// removeFromFIFOFunc removes a designated element from the list +// if that element is in the list. +// The complexity of the runtime cost is O(1). +// The returned value is the element removed, if indeed one was removed, +// otherwise `nil`. type removeFromFIFOFunc func() *request // walkFunc is called for each request in the list in the // oldest -> newest order. // ok: if walkFunc returns false then the iteration stops immediately. +// walkFunc may remove the given request from the fifo, +// but may not mutate the fifo in any othe way. type walkFunc func(*request) (ok bool) // Internal interface to abstract out the implementation details // of the underlying list used to maintain the requests. // -// Note that the FIFO list is not safe for concurrent use by multiple -// goroutines without additional locking or coordination. It rests with -// the user to ensure that the FIFO list is used with proper locking. +// Note that a fifo, including the removeFromFIFOFuncs returned from Enqueue, +// is not safe for concurrent use by multiple goroutines. type fifo interface { // Enqueue enqueues the specified request into the list and // returns a removeFromFIFOFunc function that can be used to remove the @@ -45,12 +48,15 @@ type fifo interface { // Dequeue pulls out the oldest request from the list. Dequeue() (*request, bool) + // Peek returns the oldest request without removing it. + Peek() (*request, bool) + // Length returns the number of requests in the list. Length() int - // SeatsSum returns the total number of seats of all requests - // in this list. - SeatsSum() int + // QueueSum returns the sum of initial seats, final seats, and + // additional latency aggregated from all requests in this queue. + QueueSum() queueSum // Walk iterates through the list in order of oldest -> newest // and executes the specified walkFunc for each request in that order. @@ -61,11 +67,11 @@ type fifo interface { } // the FIFO list implementation is not safe for concurrent use by multiple -// goroutines without additional locking or coordination. +// goroutines. type requestFIFO struct { *list.List - seatsSum int + sum queueSum } func newRequestFIFO() fifo { @@ -78,44 +84,57 @@ func (l *requestFIFO) Length() int { return l.Len() } -func (l *requestFIFO) SeatsSum() int { - return l.seatsSum +func (l *requestFIFO) QueueSum() queueSum { + return l.sum } func (l *requestFIFO) Enqueue(req *request) removeFromFIFOFunc { e := l.PushBack(req) - l.seatsSum += req.Seats() + addToQueueSum(&l.sum, req) return func() *request { - if e.Value != nil { - l.Remove(e) - e.Value = nil - l.seatsSum -= req.Seats() + if e.Value == nil { + return nil } + l.Remove(e) + e.Value = nil + deductFromQueueSum(&l.sum, req) return req } } func (l *requestFIFO) Dequeue() (*request, bool) { + return l.getFirst(true) +} + +func (l *requestFIFO) Peek() (*request, bool) { + return l.getFirst(false) +} + +func (l *requestFIFO) getFirst(remove bool) (*request, bool) { e := l.Front() if e == nil { return nil, false } - defer func() { - l.Remove(e) - e.Value = nil - }() + if remove { + defer func() { + l.Remove(e) + e.Value = nil + }() + } request, ok := e.Value.(*request) - if ok { - l.seatsSum -= request.Seats() + if remove && ok { + deductFromQueueSum(&l.sum, request) } return request, ok } func (l *requestFIFO) Walk(f walkFunc) { - for current := l.Front(); current != nil; current = current.Next() { + var next *list.Element + for current := l.Front(); current != nil; current = next { + next = current.Next() // f is allowed to remove current if r, ok := current.Value.(*request); ok { if !f(r) { return @@ -123,3 +142,15 @@ func (l *requestFIFO) Walk(f walkFunc) { } } } + +func addToQueueSum(sum *queueSum, req *request) { + sum.InitialSeatsSum += req.InitialSeats() + sum.MaxSeatsSum += req.MaxSeats() + sum.TotalWorkSum += req.totalWork() +} + +func deductFromQueueSum(sum *queueSum, req *request) { + sum.InitialSeatsSum -= req.InitialSeats() + sum.MaxSeatsSum -= req.MaxSeats() + sum.TotalWorkSum -= req.totalWork() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go index e4938e9f2..440975faf 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go @@ -18,21 +18,25 @@ package queueset import ( "context" + "errors" "fmt" "math" "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apiserver/pkg/util/flowcontrol/counter" "k8s.io/apiserver/pkg/util/flowcontrol/debug" fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" - "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise" + "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/eventclock" + "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise" "k8s.io/apiserver/pkg/util/flowcontrol/metrics" fqrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" "k8s.io/apiserver/pkg/util/shufflesharding" "k8s.io/klog/v2" + + // The following hack is needed to work around a tooling deficiency. + // Packages imported only for test code are not included in vendor. + // See https://kubernetes.slack.com/archives/C0EG7JC6T/p1626985671458800?thread_ts=1626983387.450800&cid=C0EG7JC6T + _ "k8s.io/utils/clock/testing" ) const nsTimeFmt = "2006-01-02 15:04:05.000000000" @@ -40,34 +44,48 @@ const nsTimeFmt = "2006-01-02 15:04:05.000000000" // queueSetFactory implements the QueueSetFactory interface // queueSetFactory makes QueueSet objects. type queueSetFactory struct { - counter counter.GoRoutineCounter - clock clock.PassiveClock + clock eventclock.Interface + promiseFactoryFactory promiseFactoryFactory } +// promiseFactory returns a WriteOnce +// - whose Set method is invoked with the queueSet locked, and +// - whose Get method is invoked with the queueSet not locked. +// The parameters are the same as for `promise.NewWriteOnce`. +type promiseFactory func(initial interface{}, doneCh <-chan struct{}, doneVal interface{}) promise.WriteOnce + +// promiseFactoryFactory returns the promiseFactory to use for the given queueSet +type promiseFactoryFactory func(*queueSet) promiseFactory + // `*queueSetCompleter` implements QueueSetCompleter. Exactly one of // the fields `factory` and `theSet` is non-nil. type queueSetCompleter struct { - factory *queueSetFactory - obsPair metrics.TimedObserverPair - theSet *queueSet - qCfg fq.QueuingConfig - dealer *shufflesharding.Dealer + factory *queueSetFactory + reqsObsPair metrics.RatioedChangeObserverPair + execSeatsObs metrics.RatioedChangeObserver + theSet *queueSet + qCfg fq.QueuingConfig + dealer *shufflesharding.Dealer } // queueSet implements the Fair Queuing for Server Requests technique // described in this package's doc, and a pointer to one implements -// the QueueSet interface. The clock, GoRoutineCounter, and estimated -// service time should not be changed; the fields listed after the +// the QueueSet interface. The fields listed before the lock +// should not be changed; the fields listed after the // lock must be accessed only while holding the lock. The methods of // this type follow the naming convention that the suffix "Locked" // means the caller must hold the lock; for a method whose name does // not end in "Locked" either acquires the lock or does not care about // locking. type queueSet struct { - clock clock.PassiveClock - counter counter.GoRoutineCounter - estimatedServiceTime float64 - obsPair metrics.TimedObserverPair + clock eventclock.Interface + estimatedServiceDuration time.Duration + + reqsObsPair metrics.RatioedChangeObserverPair // .RequestsExecuting covers regular phase only + + execSeatsObs metrics.RatioedChangeObserver // for all phases of execution + + promiseFactory promiseFactory lock sync.Mutex @@ -81,16 +99,17 @@ type queueSet struct { // the current dispatching configuration. dCfg fq.DispatchingConfig - // If `config.DesiredNumQueues` is non-zero then dealer is not nil - // and is good for `config`. + // If `qCfg.DesiredNumQueues` is non-zero then dealer is not nil + // and is good for `qCfg`. dealer *shufflesharding.Dealer // queues may be longer than the desired number, while the excess // queues are still draining. queues []*queue - // virtualTime is the number of virtual seconds since process startup - virtualTime float64 + // currentR is the amount of seat-seconds allocated per queue since process startup. + // This is our generalization of the progress meter named R in the original fair queuing work. + currentR fqrequest.SeatSeconds // lastRealTime is what `clock.Now()` yielded when `virtualTime` was last updated lastRealTime time.Time @@ -111,26 +130,35 @@ type queueSet struct { // totSeatsInUse is the number of total "seats" in use by all the // request(s) that are currently executing in this queueset. totSeatsInUse int + + // enqueues is the number of requests that have ever been enqueued + enqueues int } // NewQueueSetFactory creates a new QueueSetFactory object -func NewQueueSetFactory(c clock.PassiveClock, counter counter.GoRoutineCounter) fq.QueueSetFactory { +func NewQueueSetFactory(c eventclock.Interface) fq.QueueSetFactory { + return newTestableQueueSetFactory(c, ordinaryPromiseFactoryFactory) +} + +// newTestableQueueSetFactory creates a new QueueSetFactory object with the given promiseFactoryFactory +func newTestableQueueSetFactory(c eventclock.Interface, promiseFactoryFactory promiseFactoryFactory) fq.QueueSetFactory { return &queueSetFactory{ - counter: counter, - clock: c, + clock: c, + promiseFactoryFactory: promiseFactoryFactory, } } -func (qsf *queueSetFactory) BeginConstruction(qCfg fq.QueuingConfig, obsPair metrics.TimedObserverPair) (fq.QueueSetCompleter, error) { +func (qsf *queueSetFactory) BeginConstruction(qCfg fq.QueuingConfig, reqsObsPair metrics.RatioedChangeObserverPair, execSeatsObs metrics.RatioedChangeObserver) (fq.QueueSetCompleter, error) { dealer, err := checkConfig(qCfg) if err != nil { return nil, err } return &queueSetCompleter{ - factory: qsf, - obsPair: obsPair, - qCfg: qCfg, - dealer: dealer}, nil + factory: qsf, + reqsObsPair: reqsObsPair, + execSeatsObs: execSeatsObs, + qCfg: qCfg, + dealer: dealer}, nil } // checkConfig returns a non-nil Dealer if the config is valid and @@ -151,16 +179,17 @@ func (qsc *queueSetCompleter) Complete(dCfg fq.DispatchingConfig) fq.QueueSet { qs := qsc.theSet if qs == nil { qs = &queueSet{ - clock: qsc.factory.clock, - counter: qsc.factory.counter, - estimatedServiceTime: 60, - obsPair: qsc.obsPair, - qCfg: qsc.qCfg, - virtualTime: 0, - lastRealTime: qsc.factory.clock.Now(), + clock: qsc.factory.clock, + estimatedServiceDuration: 3 * time.Millisecond, + reqsObsPair: qsc.reqsObsPair, + execSeatsObs: qsc.execSeatsObs, + qCfg: qsc.qCfg, + currentR: 0, + lastRealTime: qsc.factory.clock.Now(), } + qs.promiseFactory = qsc.factory.promiseFactoryFactory(qs) } - qs.setConfiguration(qsc.qCfg, qsc.dealer, dCfg) + qs.setConfiguration(context.Background(), qsc.qCfg, qsc.dealer, dCfg) return qs } @@ -184,18 +213,18 @@ func (qs *queueSet) BeginConfigChange(qCfg fq.QueuingConfig) (fq.QueueSetComplet dealer: dealer}, nil } -// SetConfiguration is used to set the configuration for a queueSet. +// setConfiguration is used to set the configuration for a queueSet. // Update handling for when fields are updated is handled here as well - -// eg: if DesiredNum is increased, SetConfiguration reconciles by +// eg: if DesiredNum is increased, setConfiguration reconciles by // adding more queues. -func (qs *queueSet) setConfiguration(qCfg fq.QueuingConfig, dealer *shufflesharding.Dealer, dCfg fq.DispatchingConfig) { - qs.lockAndSyncTime() +func (qs *queueSet) setConfiguration(ctx context.Context, qCfg fq.QueuingConfig, dealer *shufflesharding.Dealer, dCfg fq.DispatchingConfig) { + qs.lockAndSyncTime(ctx) defer qs.lock.Unlock() if qCfg.DesiredNumQueues > 0 { // Adding queues is the only thing that requires immediate action - // Removing queues is handled by omitting indexes >DesiredNum from - // chooseQueueIndexLocked + // Removing queues is handled by attrition, removing a queue when + // it goes empty and there are too many. numQueues := len(qs.queues) if qCfg.DesiredNumQueues > numQueues { qs.queues = append(qs.queues, @@ -214,8 +243,9 @@ func (qs *queueSet) setConfiguration(qCfg fq.QueuingConfig, dealer *shuffleshard if qll < 1 { qll = 1 } - qs.obsPair.RequestsWaiting.SetX1(float64(qll)) - qs.obsPair.RequestsExecuting.SetX1(float64(dCfg.ConcurrencyLimit)) + qs.reqsObsPair.RequestsWaiting.SetDenominator(float64(qll)) + qs.reqsObsPair.RequestsExecuting.SetDenominator(float64(dCfg.ConcurrencyLimit)) + qs.execSeatsObs.SetDenominator(float64(dCfg.ConcurrencyLimit)) qs.dispatchAsMuchAsPossibleLocked() } @@ -225,8 +255,13 @@ type requestDecision int // Values passed through a request's decision const ( + // Serve this one decisionExecute requestDecision = iota + + // Reject this one due to APF queuing considerations decisionReject + + // This one's context timed out / was canceled decisionCancel ) @@ -235,8 +270,10 @@ const ( // executing at each point where there is a change in that quantity, // because the metrics --- and only the metrics --- track that // quantity per FlowSchema. -func (qs *queueSet) StartRequest(ctx context.Context, width *fqrequest.Width, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) (fq.Request, bool) { - qs.lockAndSyncTime() +// The queueSet's promiseFactory is invoked once if the returned Request is non-nil, +// not invoked if the Request is nil. +func (qs *queueSet) StartRequest(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) (fq.Request, bool) { + qs.lockAndSyncTime(ctx) defer qs.lock.Unlock() var req *request @@ -244,13 +281,13 @@ func (qs *queueSet) StartRequest(ctx context.Context, width *fqrequest.Width, ha // Step 0: // Apply only concurrency limit, if zero queues desired if qs.qCfg.DesiredNumQueues < 1 { - if !qs.canAccommodateSeatsLocked(int(width.Seats)) { + if !qs.canAccommodateSeatsLocked(workEstimate.MaxSeats()) { klog.V(5).Infof("QS(%s): rejecting request %q %#+v %#+v because %d seats are asked for, %d seats are in use (%d are executing) and the limit is %d", - qs.qCfg.Name, fsName, descr1, descr2, width, qs.totSeatsInUse, qs.totRequestsExecuting, qs.dCfg.ConcurrencyLimit) + qs.qCfg.Name, fsName, descr1, descr2, workEstimate, qs.totSeatsInUse, qs.totRequestsExecuting, qs.dCfg.ConcurrencyLimit) metrics.AddReject(ctx, qs.qCfg.Name, fsName, "concurrency-limit") return nil, qs.isIdleLocked() } - req = qs.dispatchSansQueueLocked(ctx, width, flowDistinguisher, fsName, descr1, descr2) + req = qs.dispatchSansQueueLocked(ctx, workEstimate, flowDistinguisher, fsName, descr1, descr2) return req, false } @@ -261,7 +298,7 @@ func (qs *queueSet) StartRequest(ctx context.Context, width *fqrequest.Width, ha // 3) Reject current request if there is not enough concurrency shares and // we are at max queue length // 4) If not rejected, create a request and enqueue - req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, width, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) + req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) // req == nil means that the request was rejected - no remaining // concurrency shares and at max queue length already if req == nil { @@ -281,42 +318,24 @@ func (qs *queueSet) StartRequest(ctx context.Context, width *fqrequest.Width, ha // request from that queue. qs.dispatchAsMuchAsPossibleLocked() - // ======================================================================== - // Step 3: - - // Set up a relay from the context's Done channel to the world - // of well-counted goroutines. We Are Told that every - // request's context's Done channel gets closed by the time - // the request is done being processed. - doneCh := ctx.Done() - - // Retrieve the queueset configuration name while we have the lock - // and use it in the goroutine below. - configName := qs.qCfg.Name - - if doneCh != nil { - qs.preCreateOrUnblockGoroutine() - go func() { - defer runtime.HandleCrash() - qs.goroutineDoneOrBlocked() - _ = <-doneCh - // Whatever goroutine unblocked the preceding receive MUST - // have already either (a) incremented qs.counter or (b) - // known that said counter is not actually counting or (c) - // known that the count does not need to be accurate. - // BTW, the count only needs to be accurate in a test that - // uses FakeEventClock::Run(). - klog.V(6).Infof("QS(%s): Context of request %q %#+v %#+v is Done", configName, fsName, descr1, descr2) - qs.cancelWait(req) - qs.goroutineDoneOrBlocked() - }() - } return req, false } -// Seats returns the number of seats this request requires. -func (req *request) Seats() int { - return int(req.width.Seats) +// ordinaryPromiseFactoryFactory is the promiseFactoryFactory that +// a queueSetFactory would ordinarily use. +// Test code might use something different. +func ordinaryPromiseFactoryFactory(qs *queueSet) promiseFactory { + return promise.NewWriteOnce +} + +// MaxSeats returns the maximum number of seats this request requires, it is +// the maxumum of the two - WorkEstimate.InitialSeats, WorkEstimate.FinalSeats. +func (req *request) MaxSeats() int { + return req.workEstimate.MaxSeats() +} + +func (req *request) InitialSeats() int { + return int(req.workEstimate.InitialSeats) } func (req *request) NoteQueued(inQueue bool) { @@ -343,41 +362,45 @@ func (req *request) Finish(execFn func()) bool { func (req *request) wait() (bool, bool) { qs := req.qs - qs.lock.Lock() + + // ======================================================================== + // Step 3: + // The final step is to wait on a decision from + // somewhere and then act on it. + decisionAny := req.decision.Get() + qs.lockAndSyncTime(req.ctx) defer qs.lock.Unlock() if req.waitStarted { // This can not happen, because the client is forbidden to // call Wait twice on the same request - panic(fmt.Sprintf("Multiple calls to the Wait method, QueueSet=%s, startTime=%s, descr1=%#+v, descr2=%#+v", req.qs.qCfg.Name, req.startTime, req.descr1, req.descr2)) + klog.Errorf("Duplicate call to the Wait method! Immediately returning execute=false. QueueSet=%s, startTime=%s, descr1=%#+v, descr2=%#+v", req.qs.qCfg.Name, req.startTime, req.descr1, req.descr2) + return false, qs.isIdleLocked() } req.waitStarted = true - - // ======================================================================== - // Step 4: - // The final step is to wait on a decision from - // somewhere and then act on it. - decisionAny := req.decision.GetLocked() - qs.syncTimeLocked() - decision, isDecision := decisionAny.(requestDecision) - if !isDecision { - panic(fmt.Sprintf("QS(%s): Impossible decision %#+v (of type %T) for request %#+v %#+v", qs.qCfg.Name, decisionAny, decisionAny, req.descr1, req.descr2)) - } - switch decision { + switch decisionAny { case decisionReject: klog.V(5).Infof("QS(%s): request %#+v %#+v timed out after being enqueued\n", qs.qCfg.Name, req.descr1, req.descr2) metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out") return false, qs.isIdleLocked() case decisionCancel: - // TODO(aaron-prindle) add metrics for this case - klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) - return false, qs.isIdleLocked() case decisionExecute: klog.V(5).Infof("QS(%s): Dispatching request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) return true, false default: // This can not happen, all possible values are handled above - panic(decision) + klog.Errorf("QS(%s): Impossible decision (type %T, value %#+v) for request %#+v %#+v! Treating as cancel", qs.qCfg.Name, decisionAny, decisionAny, req.descr1, req.descr2) } + // TODO(aaron-prindle) add metrics for this case + klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) + // remove the request from the queue as it has timed out + if req.removeFromQueueLocked() != nil { + qs.totRequestsWaiting-- + metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "cancelled") + metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) + req.NoteQueued(false) + qs.reqsObsPair.RequestsWaiting.Add(-1) + } + return false, qs.isIdleLocked() } func (qs *queueSet) IsIdle() bool { @@ -391,23 +414,74 @@ func (qs *queueSet) isIdleLocked() bool { } // lockAndSyncTime acquires the lock and updates the virtual time. -// Doing them together avoids the mistake of modify some queue state +// Doing them together avoids the mistake of modifying some queue state // before calling syncTimeLocked. -func (qs *queueSet) lockAndSyncTime() { +func (qs *queueSet) lockAndSyncTime(ctx context.Context) { qs.lock.Lock() - qs.syncTimeLocked() + qs.syncTimeLocked(ctx) } // syncTimeLocked updates the virtual time based on the assumption // that the current state of the queues has been in effect since // `qs.lastRealTime`. Thus, it should be invoked after acquiring the // lock and before modifying the state of any queue. -func (qs *queueSet) syncTimeLocked() { +func (qs *queueSet) syncTimeLocked(ctx context.Context) { realNow := qs.clock.Now() - timeSinceLast := realNow.Sub(qs.lastRealTime).Seconds() + timeSinceLast := realNow.Sub(qs.lastRealTime) qs.lastRealTime = realNow - qs.virtualTime += timeSinceLast * qs.getVirtualTimeRatioLocked() - metrics.SetCurrentR(qs.qCfg.Name, qs.virtualTime) + prevR := qs.currentR + incrR := fqrequest.SeatsTimesDuration(qs.getVirtualTimeRatioLocked(), timeSinceLast) + qs.currentR = prevR + incrR + switch { + case prevR > qs.currentR: + klog.ErrorS(errors.New("queueset::currentR overflow"), "Overflow", "QS", qs.qCfg.Name, "when", realNow.Format(nsTimeFmt), "prevR", prevR, "incrR", incrR, "currentR", qs.currentR) + case qs.currentR >= highR: + qs.advanceEpoch(ctx, realNow, incrR) + } + metrics.SetCurrentR(qs.qCfg.Name, qs.currentR.ToFloat()) +} + +// rDecrement is the amount by which the progress meter R is wound backwards +// when needed to avoid overflow. +const rDecrement = fqrequest.MaxSeatSeconds / 2 + +// highR is the threshold that triggers advance of the epoch. +// That is, decrementing the global progress meter R by rDecrement. +const highR = rDecrement + rDecrement/2 + +// advanceEpoch subtracts rDecrement from the global progress meter R +// and all the readings that have been taked from that meter. +// The now and incrR parameters are only used to add info to the log messages. +func (qs *queueSet) advanceEpoch(ctx context.Context, now time.Time, incrR fqrequest.SeatSeconds) { + oldR := qs.currentR + qs.currentR -= rDecrement + klog.InfoS("Advancing epoch", "QS", qs.qCfg.Name, "when", now.Format(nsTimeFmt), "oldR", oldR, "newR", qs.currentR, "incrR", incrR) + success := true + for qIdx, queue := range qs.queues { + if queue.requests.Length() == 0 && queue.requestsExecuting == 0 { + // Do not just decrement, the value could be quite outdated. + // It is safe to reset to zero in this case, because the next request + // will overwrite the zero with `qs.currentR`. + queue.nextDispatchR = 0 + continue + } + oldNextDispatchR := queue.nextDispatchR + queue.nextDispatchR -= rDecrement + if queue.nextDispatchR > oldNextDispatchR { + klog.ErrorS(errors.New("queue::nextDispatchR underflow"), "Underflow", "QS", qs.qCfg.Name, "queue", qIdx, "oldNextDispatchR", oldNextDispatchR, "newNextDispatchR", queue.nextDispatchR, "incrR", incrR) + success = false + } + queue.requests.Walk(func(req *request) bool { + oldArrivalR := req.arrivalR + req.arrivalR -= rDecrement + if req.arrivalR > oldArrivalR { + klog.ErrorS(errors.New("request::arrivalR underflow"), "Underflow", "QS", qs.qCfg.Name, "queue", qIdx, "request", *req, "oldArrivalR", oldArrivalR, "incrR", incrR) + success = false + } + return true + }) + } + metrics.AddEpochAdvance(ctx, qs.qCfg.Name, success) } // getVirtualTimeRatio calculates the rate at which virtual time has @@ -416,7 +490,9 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 { activeQueues := 0 seatsRequested := 0 for _, queue := range qs.queues { - seatsRequested += (queue.seatsInUse + queue.requests.SeatsSum()) + // here we want the sum of the maximum width of the requests in this queue since our + // goal is to find the maximum rate at which the queue could work. + seatsRequested += (queue.seatsInUse + queue.requests.QueueSum().MaxSeatsSum) if queue.requests.Length() > 0 || queue.requestsExecuting > 0 { activeQueues++ } @@ -437,9 +513,9 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 { // returns the enqueud request on a successful enqueue // returns nil in the case that there is no available concurrency or // the queuelengthlimit has been reached -func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, width *fqrequest.Width, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { +func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { // Start with the shuffle sharding, to pick a queue. - queueIdx := qs.chooseQueueIndexLocked(hashValue, descr1, descr2) + queueIdx := qs.shuffleShardLocked(hashValue, descr1, descr2) queue := qs.queues[queueIdx] // The next step is the logic to reject requests that have been waiting too long qs.removeTimedOutRequestsFromQueueLocked(queue, fsName) @@ -447,19 +523,22 @@ func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Conte // requests that are in the queue longer than the timeout if there are no new requests // We prefer the simplicity over the promptness, at least for now. + defer qs.boundNextDispatchLocked(queue) + // Create a request and enqueue req := &request{ qs: qs, fsName: fsName, flowDistinguisher: flowDistinguisher, ctx: ctx, - decision: lockingpromise.NewWriteOnce(&qs.lock, qs.counter), + decision: qs.promiseFactory(nil, ctx.Done(), decisionCancel), arrivalTime: qs.clock.Now(), + arrivalR: qs.currentR, queue: queue, descr1: descr1, descr2: descr2, queueNoteFn: queueNoteFn, - width: *width, + workEstimate: qs.completeWorkEstimate(workEstimate), } if ok := qs.rejectOrEnqueueLocked(req); !ok { return nil @@ -468,24 +547,36 @@ func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Conte return req } -// chooseQueueIndexLocked uses shuffle sharding to select a queue index +// shuffleShardLocked uses shuffle sharding to select a queue index // using the given hashValue and the shuffle sharding parameters of the queueSet. -func (qs *queueSet) chooseQueueIndexLocked(hashValue uint64, descr1, descr2 interface{}) int { +func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interface{}) int { + var backHand [8]int + // Deal into a data structure, so that the order of visit below is not necessarily the order of the deal. + // This removes bias in the case of flows with overlapping hands. + hand := qs.dealer.DealIntoHand(hashValue, backHand[:]) + handSize := len(hand) + offset := qs.enqueues % handSize + qs.enqueues++ bestQueueIdx := -1 - bestQueueSeatsSum := int(math.MaxInt32) - // the dealer uses the current desired number of queues, which is no larger than the number in `qs.queues`. - qs.dealer.Deal(hashValue, func(queueIdx int) { - // TODO: Consider taking into account `additional latency` of requests - // in addition to their widths. - // Ideally, this should be based on projected completion time in the - // virtual world of the youngest request in the queue. - thisSeatsSum := qs.queues[queueIdx].requests.SeatsSum() - klog.V(7).Infof("QS(%s): For request %#+v %#+v considering queue %d of seatsSum %d", qs.qCfg.Name, descr1, descr2, queueIdx, thisSeatsSum) - if thisSeatsSum < bestQueueSeatsSum { - bestQueueIdx, bestQueueSeatsSum = queueIdx, thisSeatsSum + minQueueSeatSeconds := fqrequest.MaxSeatSeconds + for i := 0; i < handSize; i++ { + queueIdx := hand[(offset+i)%handSize] + queue := qs.queues[queueIdx] + queueSum := queue.requests.QueueSum() + + // this is the total amount of work in seat-seconds for requests + // waiting in this queue, we will select the queue with the minimum. + thisQueueSeatSeconds := queueSum.TotalWorkSum + klog.V(7).Infof("QS(%s): For request %#+v %#+v considering queue %d with sum: %#v and %d seats in use, nextDispatchR=%v", qs.qCfg.Name, descr1, descr2, queueIdx, queueSum, queue.seatsInUse, queue.nextDispatchR) + if thisQueueSeatSeconds < minQueueSeatSeconds { + minQueueSeatSeconds = thisQueueSeatSeconds + bestQueueIdx = queueIdx } - }) - klog.V(6).Infof("QS(%s) at r=%s v=%.9fs: For request %#+v %#+v chose queue %d, had %d waiting & %d executing", qs.qCfg.Name, qs.clock.Now().Format(nsTimeFmt), qs.virtualTime, descr1, descr2, bestQueueIdx, bestQueueSeatsSum, qs.queues[bestQueueIdx].requestsExecuting) + } + if klog.V(6).Enabled() { + chosenQueue := qs.queues[bestQueueIdx] + klog.V(6).Infof("QS(%s) at t=%s R=%v: For request %#+v %#+v chose queue %d, with sum: %#v & %d seats in use & nextDispatchR=%v", qs.qCfg.Name, qs.clock.Now().Format(nsTimeFmt), qs.currentR, descr1, descr2, bestQueueIdx, chosenQueue.requests.QueueSum(), chosenQueue.seatsInUse, chosenQueue.nextDispatchR) + } return bestQueueIdx } @@ -499,33 +590,26 @@ func (qs *queueSet) removeTimedOutRequestsFromQueueLocked(queue *queue, fsName s // can short circuit loop (break) if oldest requests are not timing out // as newer requests also will not have timed out - // now - requestWaitLimit = waitLimit - waitLimit := now.Add(-qs.qCfg.RequestWaitLimit) + // now - requestWaitLimit = arrivalLimit + arrivalLimit := now.Add(-qs.qCfg.RequestWaitLimit) reqs.Walk(func(req *request) bool { - if waitLimit.After(req.arrivalTime) { - req.decision.SetLocked(decisionReject) - timeoutCount++ - metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) - req.NoteQueued(false) - + if arrivalLimit.After(req.arrivalTime) { + if req.decision.Set(decisionReject) && req.removeFromQueueLocked() != nil { + timeoutCount++ + req.NoteQueued(false) + metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) + } // we need to check if the next request has timed out. return true } - // since reqs are sorted oldest -> newest, we are done here. return false }) // remove timed out requests from queue if timeoutCount > 0 { - // The number of requests we have timed out is timeoutCount, - // so, let's dequeue the exact number of requests for this queue. - for i := 0; i < timeoutCount; i++ { - queue.requests.Dequeue() - } - // decrement the # of requestsEnqueued qs.totRequestsWaiting -= timeoutCount - qs.obsPair.RequestsWaiting.Add(float64(-timeoutCount)) + qs.reqsObsPair.RequestsWaiting.Add(float64(-timeoutCount)) } } @@ -551,35 +635,26 @@ func (qs *queueSet) enqueueLocked(request *request) { queue := request.queue now := qs.clock.Now() if queue.requests.Length() == 0 && queue.requestsExecuting == 0 { - // the queue’s virtual start time is set to the virtual time. - queue.virtualStart = qs.virtualTime + // the queue’s start R is set to the virtual time. + queue.nextDispatchR = qs.currentR if klog.V(6).Enabled() { - klog.Infof("QS(%s) at r=%s v=%.9fs: initialized queue %d virtual start time due to request %#+v %#+v", qs.qCfg.Name, now.Format(nsTimeFmt), queue.virtualStart, queue.index, request.descr1, request.descr2) + klog.Infof("QS(%s) at t=%s R=%v: initialized queue %d start R due to request %#+v %#+v", qs.qCfg.Name, now.Format(nsTimeFmt), queue.nextDispatchR, queue.index, request.descr1, request.descr2) } } - queue.Enqueue(request) + request.removeFromQueueLocked = queue.requests.Enqueue(request) qs.totRequestsWaiting++ metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, 1) request.NoteQueued(true) - qs.obsPair.RequestsWaiting.Add(1) + qs.reqsObsPair.RequestsWaiting.Add(1) } -// dispatchAsMuchAsPossibleLocked runs a loop, as long as there -// are non-empty queues and the number currently executing is less than the -// assured concurrency value. The body of the loop uses the fair queuing -// technique to pick a queue, dequeue the request at the head of that -// queue, increment the count of the number executing, and send true -// to the request's channel. +// dispatchAsMuchAsPossibleLocked does as many dispatches as possible now. func (qs *queueSet) dispatchAsMuchAsPossibleLocked() { - for qs.totRequestsWaiting != 0 && qs.totSeatsInUse < qs.dCfg.ConcurrencyLimit { - ok := qs.dispatchLocked() - if !ok { - break - } + for qs.totRequestsWaiting != 0 && qs.totSeatsInUse < qs.dCfg.ConcurrencyLimit && qs.dispatchLocked() { } } -func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, width *fqrequest.Width, flowDistinguisher, fsName string, descr1, descr2 interface{}) *request { +func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, flowDistinguisher, fsName string, descr1, descr2 interface{}) *request { // does not call metrics.SetDispatchMetrics because there is no queuing and thus no interesting virtual world now := qs.clock.Now() req := &request{ @@ -588,83 +663,68 @@ func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, width *fqreques flowDistinguisher: flowDistinguisher, ctx: ctx, startTime: now, - decision: lockingpromise.NewWriteOnce(&qs.lock, qs.counter), + decision: qs.promiseFactory(decisionExecute, ctx.Done(), decisionCancel), arrivalTime: now, + arrivalR: qs.currentR, descr1: descr1, descr2: descr2, - width: *width, + workEstimate: qs.completeWorkEstimate(workEstimate), } - req.decision.SetLocked(decisionExecute) qs.totRequestsExecuting++ - qs.totSeatsInUse += req.Seats() + qs.totSeatsInUse += req.MaxSeats() metrics.AddRequestsExecuting(ctx, qs.qCfg.Name, fsName, 1) - metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, fsName, req.Seats()) - qs.obsPair.RequestsExecuting.Add(1) + metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, fsName, req.MaxSeats()) + qs.reqsObsPair.RequestsExecuting.Add(1) + qs.execSeatsObs.Add(float64(req.MaxSeats())) if klog.V(5).Enabled() { - klog.Infof("QS(%s) at r=%s v=%.9fs: immediate dispatch of request %q %#+v %#+v, qs will have %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.virtualTime, fsName, descr1, descr2, qs.totRequestsExecuting) + klog.Infof("QS(%s) at t=%s R=%v: immediate dispatch of request %q %#+v %#+v, qs will have %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, fsName, descr1, descr2, qs.totRequestsExecuting) } return req } // dispatchLocked uses the Fair Queuing for Server Requests method to // select a queue and dispatch the oldest request in that queue. The -// return value indicates whether a request was dispatched; this will -// be false when there are no requests waiting in any queue. +// return value indicates whether a request was dequeued; this will +// be false when either all queues are empty or the request at the head +// of the next queue cannot be dispatched. func (qs *queueSet) dispatchLocked() bool { - queue := qs.selectQueueLocked() + queue, request := qs.findDispatchQueueLocked() if queue == nil { return false } - request, ok := queue.Dequeue() - if !ok { // This should never happen. But if it does... + if request == nil { // This should never happen. But if it does... return false } + qs.totRequestsWaiting-- + metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -1) + request.NoteQueued(false) + qs.reqsObsPair.RequestsWaiting.Add(-1) + defer qs.boundNextDispatchLocked(queue) + if !request.decision.Set(decisionExecute) { + return true + } request.startTime = qs.clock.Now() // At this moment the request leaves its queue and starts // executing. We do not recognize any interim state between // "queued" and "executing". While that means "executing" // includes a little overhead from this package, this is not a // problem because other overhead is also included. - qs.totRequestsWaiting-- qs.totRequestsExecuting++ - qs.totSeatsInUse += request.Seats() + qs.totSeatsInUse += request.MaxSeats() queue.requestsExecuting++ - queue.seatsInUse += request.Seats() - metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -1) - request.NoteQueued(false) + queue.seatsInUse += request.MaxSeats() metrics.AddRequestsExecuting(request.ctx, qs.qCfg.Name, request.fsName, 1) - metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, request.fsName, request.Seats()) - qs.obsPair.RequestsWaiting.Add(-1) - qs.obsPair.RequestsExecuting.Add(1) + metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, request.fsName, request.MaxSeats()) + qs.reqsObsPair.RequestsExecuting.Add(1) + qs.execSeatsObs.Add(float64(request.MaxSeats())) if klog.V(6).Enabled() { - klog.Infof("QS(%s) at r=%s v=%.9fs: dispatching request %#+v %#+v from queue %d with virtual start time %.9fs, queue will have %d waiting & %d executing", - qs.qCfg.Name, request.startTime.Format(nsTimeFmt), qs.virtualTime, request.descr1, request.descr2, - queue.index, queue.virtualStart, queue.requests.Length(), queue.requestsExecuting) + klog.Infof("QS(%s) at t=%s R=%v: dispatching request %#+v %#+v work %v from queue %d with start R %v, queue will have %d waiting & %d requests occupying %d seats, set will have %d seats occupied", + qs.qCfg.Name, request.startTime.Format(nsTimeFmt), qs.currentR, request.descr1, request.descr2, + request.workEstimate, queue.index, queue.nextDispatchR, queue.requests.Length(), queue.requestsExecuting, queue.seatsInUse, qs.totSeatsInUse) } - // When a request is dequeued for service -> qs.virtualStart += G - queue.virtualStart += qs.estimatedServiceTime * float64(request.Seats()) - request.decision.SetLocked(decisionExecute) - return ok -} - -// cancelWait ensures the request is not waiting. This is only -// applicable to a request that has been assigned to a queue. -func (qs *queueSet) cancelWait(req *request) { - qs.lock.Lock() - defer qs.lock.Unlock() - if req.decision.IsSetLocked() { - // The request has already been removed from the queue - // and so we consider its wait to be over. - return - } - req.decision.SetLocked(decisionCancel) - - // remove the request from the queue as it has timed out - req.removeFromQueueFn() - qs.totRequestsWaiting-- - metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) - req.NoteQueued(false) - qs.obsPair.RequestsWaiting.Add(-1) + // When a request is dequeued for service -> qs.virtualStart += G * width + queue.nextDispatchR += request.totalWork() + return true } // canAccommodateSeatsLocked returns true if this queueSet has enough @@ -682,10 +742,6 @@ func (qs *queueSet) canAccommodateSeatsLocked(seats int) bool { } // wait for all "currently" executing requests in this queueSet // to finish before we can execute this request. - if klog.V(4).Enabled() { - klog.Infof("QS(%s): seats (%d) asked for exceeds concurrency limit, waiting for currently executing requests to complete, %d seats are in use (%d are executing) and the limit is %d", - qs.qCfg.Name, seats, qs.totSeatsInUse, qs.totRequestsExecuting, qs.dCfg.ConcurrencyLimit) - } return false case qs.totSeatsInUse+seats > qs.dCfg.ConcurrencyLimit: return false @@ -694,45 +750,31 @@ func (qs *queueSet) canAccommodateSeatsLocked(seats int) bool { return true } -// selectQueueLocked examines the queues in round robin order and +// findDispatchQueueLocked examines the queues in round robin order and // returns the first one of those for which the virtual finish time of -// the oldest waiting request is minimal. -func (qs *queueSet) selectQueueLocked() *queue { - minVirtualFinish := math.Inf(1) - sMin := math.Inf(1) - dsMin := math.Inf(1) - sMax := math.Inf(-1) - dsMax := math.Inf(-1) +// the oldest waiting request is minimal, and also returns that request. +// Returns nils if the head of the selected queue can not be dispatched now. +func (qs *queueSet) findDispatchQueueLocked() (*queue, *request) { + minVirtualFinish := fqrequest.MaxSeatSeconds + sMin := fqrequest.MaxSeatSeconds + dsMin := fqrequest.MaxSeatSeconds + sMax := fqrequest.MinSeatSeconds + dsMax := fqrequest.MinSeatSeconds var minQueue *queue var minIndex int nq := len(qs.queues) for range qs.queues { qs.robinIndex = (qs.robinIndex + 1) % nq queue := qs.queues[qs.robinIndex] - if queue.requests.Length() != 0 { - sMin = math.Min(sMin, queue.virtualStart) - sMax = math.Max(sMax, queue.virtualStart) - estimatedWorkInProgress := qs.estimatedServiceTime * float64(queue.seatsInUse) - dsMin = math.Min(dsMin, queue.virtualStart-estimatedWorkInProgress) - dsMax = math.Max(dsMax, queue.virtualStart-estimatedWorkInProgress) - // the virtual finish time of the oldest request is: - // virtual start time + G - // we are not taking the width of the request into account when - // we calculate the virtual finish time of the request because - // it can starve requests with smaller wdith in other queues. - // - // so let's draw an example of the starving scenario: - // - G=60 (estimated service time in seconds) - // - concurrency limit=2 - // - we have two queues, q1 and q2 - // - q1 has an infinite supply of requests with width W=1 - // - q2 has one request waiting in the queue with width W=2 - // - virtual start time for both q1 and q2 are at t0 - // - requests complete really fast, S=1ms on q1 - // in this scenario we will execute roughly 60,000 requests - // from q1 before we pick the request from q2. - currentVirtualFinish := queue.virtualStart + qs.estimatedServiceTime - + oldestWaiting, _ := queue.requests.Peek() + if oldestWaiting != nil { + sMin = ssMin(sMin, queue.nextDispatchR) + sMax = ssMax(sMax, queue.nextDispatchR) + estimatedWorkInProgress := fqrequest.SeatsTimesDuration(float64(queue.seatsInUse), qs.estimatedServiceDuration) + dsMin = ssMin(dsMin, queue.nextDispatchR-estimatedWorkInProgress) + dsMax = ssMax(dsMax, queue.nextDispatchR-estimatedWorkInProgress) + currentVirtualFinish := queue.nextDispatchR + oldestWaiting.totalWork() + klog.V(11).InfoS("Considering queue to dispatch", "queueSet", qs.qCfg.Name, "queue", qs.robinIndex, "finishR", currentVirtualFinish) if currentVirtualFinish < minVirtualFinish { minVirtualFinish = currentVirtualFinish minQueue = queue @@ -741,41 +783,57 @@ func (qs *queueSet) selectQueueLocked() *queue { } } - // TODO: add a method to fifo that lets us peek at the oldest request - var oldestReqFromMinQueue *request - minQueue.requests.Walk(func(r *request) bool { - oldestReqFromMinQueue = r - return false - }) - if oldestReqFromMinQueue == nil || !qs.canAccommodateSeatsLocked(oldestReqFromMinQueue.Seats()) { + oldestReqFromMinQueue, _ := minQueue.requests.Peek() + if oldestReqFromMinQueue == nil { + // This cannot happen + klog.ErrorS(errors.New("selected queue is empty"), "Impossible", "queueSet", qs.qCfg.Name) + return nil, nil + } + if !qs.canAccommodateSeatsLocked(oldestReqFromMinQueue.MaxSeats()) { // since we have not picked the queue with the minimum virtual finish // time, we are not going to advance the round robin index here. - return nil + if klog.V(4).Enabled() { + klog.Infof("QS(%s): request %v %v seats %d cannot be dispatched from queue %d, waiting for currently executing requests to complete, %d requests are occupying %d seats and the limit is %d", + qs.qCfg.Name, oldestReqFromMinQueue.descr1, oldestReqFromMinQueue.descr2, oldestReqFromMinQueue.MaxSeats(), minQueue.index, qs.totRequestsExecuting, qs.totSeatsInUse, qs.dCfg.ConcurrencyLimit) + } + return nil, nil + } + oldestReqFromMinQueue.removeFromQueueLocked() + + // If the requested final seats exceed capacity of that queue, + // we reduce them to current capacity and adjust additional latency + // to preserve the total amount of work. + if oldestReqFromMinQueue.workEstimate.FinalSeats > uint(qs.dCfg.ConcurrencyLimit) { + finalSeats := uint(qs.dCfg.ConcurrencyLimit) + additionalLatency := oldestReqFromMinQueue.workEstimate.finalWork.DurationPerSeat(float64(finalSeats)) + oldestReqFromMinQueue.workEstimate.FinalSeats = finalSeats + oldestReqFromMinQueue.workEstimate.AdditionalLatency = additionalLatency } // we set the round robin indexing to start at the chose queue // for the next round. This way the non-selected queues // win in the case that the virtual finish times are the same qs.robinIndex = minIndex - // according to the original FQ formula: - // - // Si = MAX(R(t), Fi-1) - // - // the virtual start (excluding the estimated cost) of the chose - // queue should always be greater or equal to the global virtual - // time. - // - // hence we're refreshing the per-queue virtual time for the chosen - // queue here. if the last virtual start time (excluded estimated cost) - // falls behind the global virtual time, we update the latest virtual - // start by: + - previouslyEstimatedServiceTime := float64(minQueue.seatsInUse) * qs.estimatedServiceTime - if qs.virtualTime > minQueue.virtualStart-previouslyEstimatedServiceTime { - // per-queue virtual time should not fall behind the global - minQueue.virtualStart = qs.virtualTime + previouslyEstimatedServiceTime + + if minQueue.nextDispatchR < oldestReqFromMinQueue.arrivalR { + klog.ErrorS(errors.New("dispatch before arrival"), "Inconceivable!", "QS", qs.qCfg.Name, "queue", minQueue.index, "dispatchR", minQueue.nextDispatchR, "request", oldestReqFromMinQueue) } - metrics.SetDispatchMetrics(qs.qCfg.Name, qs.virtualTime, minQueue.virtualStart, sMin, sMax, dsMin, dsMax) - return minQueue + metrics.SetDispatchMetrics(qs.qCfg.Name, qs.currentR.ToFloat(), minQueue.nextDispatchR.ToFloat(), sMin.ToFloat(), sMax.ToFloat(), dsMin.ToFloat(), dsMax.ToFloat()) + return minQueue, oldestReqFromMinQueue +} + +func ssMin(a, b fqrequest.SeatSeconds) fqrequest.SeatSeconds { + if a > b { + return b + } + return a +} + +func ssMax(a, b fqrequest.SeatSeconds) fqrequest.SeatSeconds { + if a < b { + return b + } + return a } // finishRequestAndDispatchAsMuchAsPossible is a convenience method @@ -784,7 +842,7 @@ func (qs *queueSet) selectQueueLocked() *queue { // once a request finishes execution or is canceled. This returns a bool // indicating whether the QueueSet is now idle. func (qs *queueSet) finishRequestAndDispatchAsMuchAsPossible(req *request) bool { - qs.lockAndSyncTime() + qs.lockAndSyncTime(req.ctx) defer qs.lock.Unlock() qs.finishRequestLocked(req) @@ -798,32 +856,105 @@ func (qs *queueSet) finishRequestAndDispatchAsMuchAsPossible(req *request) bool func (qs *queueSet) finishRequestLocked(r *request) { now := qs.clock.Now() qs.totRequestsExecuting-- - qs.totSeatsInUse -= r.Seats() metrics.AddRequestsExecuting(r.ctx, qs.qCfg.Name, r.fsName, -1) - metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, r.fsName, -r.Seats()) - qs.obsPair.RequestsExecuting.Add(-1) + qs.reqsObsPair.RequestsExecuting.Add(-1) - if r.queue == nil { - if klog.V(6).Enabled() { - klog.Infof("QS(%s) at r=%s v=%.9fs: request %#+v %#+v finished, qs will have %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.virtualTime, r.descr1, r.descr2, qs.totRequestsExecuting) + actualServiceDuration := now.Sub(r.startTime) + + // TODO: for now we keep the logic localized so it is easier to see + // how the counters are tracked for queueset and queue, in future we + // can refactor to move this function. + releaseSeatsLocked := func() { + defer qs.removeQueueIfEmptyLocked(r) + + qs.totSeatsInUse -= r.MaxSeats() + metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, r.fsName, -r.MaxSeats()) + qs.execSeatsObs.Add(-float64(r.MaxSeats())) + if r.queue != nil { + r.queue.seatsInUse -= r.MaxSeats() } - return } - S := now.Sub(r.startTime).Seconds() + defer func() { + if r.workEstimate.AdditionalLatency <= 0 { + // release the seats allocated to this request immediately + releaseSeatsLocked() + if !klog.V(6).Enabled() { + } else if r.queue != nil { + klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished all use of %d seats, adjusted queue %d start R to %v due to service time %.9fs, queue will have %d requests with %#v waiting & %d requests occupying %d seats", + qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.MaxSeats(), r.queue.index, + r.queue.nextDispatchR, actualServiceDuration.Seconds(), r.queue.requests.Length(), r.queue.requests.QueueSum(), r.queue.requestsExecuting, r.queue.seatsInUse) + } else { + klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished all use of %d seats, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, qs.totRequestsExecuting, qs.totSeatsInUse) + } + return + } - // When a request finishes being served, and the actual service time was S, - // the queue’s virtual start time is decremented by (G - S)*width. - r.queue.virtualStart -= (qs.estimatedServiceTime - S) * float64(r.Seats()) + additionalLatency := r.workEstimate.AdditionalLatency + if !klog.V(6).Enabled() { + } else if r.queue != nil { + klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished main use of %d seats but lingering on %d seats for %v seconds, adjusted queue %d start R to %v due to service time %.9fs, queue will have %d requests with %#v waiting & %d requests occupying %d seats", + qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, r.workEstimate.FinalSeats, additionalLatency.Seconds(), r.queue.index, + r.queue.nextDispatchR, actualServiceDuration.Seconds(), r.queue.requests.Length(), r.queue.requests.QueueSum(), r.queue.requestsExecuting, r.queue.seatsInUse) + } else { + klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished main use of %d seats but lingering on %d seats for %v seconds, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, r.workEstimate.FinalSeats, additionalLatency.Seconds(), qs.totRequestsExecuting, qs.totSeatsInUse) + } + // EventAfterDuration will execute the event func in a new goroutine, + // so the seats allocated to this request will be released after + // AdditionalLatency elapses, this ensures that the additional + // latency has no impact on the user experience. + qs.clock.EventAfterDuration(func(_ time.Time) { + qs.lockAndSyncTime(r.ctx) + defer qs.lock.Unlock() + now := qs.clock.Now() + releaseSeatsLocked() + if !klog.V(6).Enabled() { + } else if r.queue != nil { + klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished lingering on %d seats, queue %d will have %d requests with %#v waiting & %d requests occupying %d seats", + qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.FinalSeats, r.queue.index, + r.queue.requests.Length(), r.queue.requests.QueueSum(), r.queue.requestsExecuting, r.queue.seatsInUse) + } else { + klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished lingering on %d seats, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.FinalSeats, qs.totRequestsExecuting, qs.totSeatsInUse) + } + qs.dispatchAsMuchAsPossibleLocked() + }, additionalLatency) + }() - // request has finished, remove from requests executing - r.queue.requestsExecuting-- - r.queue.seatsInUse -= r.Seats() + if r.queue != nil { + // request has finished, remove from requests executing + r.queue.requestsExecuting-- - if klog.V(6).Enabled() { - klog.Infof("QS(%s) at r=%s v=%.9fs: request %#+v %#+v finished, adjusted queue %d virtual start time to %.9fs due to service time %.9fs, queue will have %d waiting & %d executing", - qs.qCfg.Name, now.Format(nsTimeFmt), qs.virtualTime, r.descr1, r.descr2, r.queue.index, - r.queue.virtualStart, S, r.queue.requests.Length(), r.queue.requestsExecuting) + // When a request finishes being served, and the actual service time was S, + // the queue’s start R is decremented by (G - S)*width. + r.queue.nextDispatchR -= fqrequest.SeatsTimesDuration(float64(r.InitialSeats()), qs.estimatedServiceDuration-actualServiceDuration) + qs.boundNextDispatchLocked(r.queue) + } +} + +// boundNextDispatchLocked applies the anti-windup hack. +// We need a hack because all non-empty queues are allocated the same +// number of seats. A queue that can not use all those seats and does +// not go empty accumulates a progresively earlier `virtualStart` compared +// to queues that are using more than they are allocated. +// The following hack addresses the first side of that inequity, +// by insisting that dispatch in the virtual world not precede arrival. +func (qs *queueSet) boundNextDispatchLocked(queue *queue) { + oldestReqFromMinQueue, _ := queue.requests.Peek() + if oldestReqFromMinQueue == nil { + return + } + var virtualStartBound = oldestReqFromMinQueue.arrivalR + if queue.nextDispatchR < virtualStartBound { + if klog.V(4).Enabled() { + klog.InfoS("AntiWindup tweaked queue", "QS", qs.qCfg.Name, "queue", queue.index, "time", qs.clock.Now().Format(nsTimeFmt), "requestDescr1", oldestReqFromMinQueue.descr1, "requestDescr2", oldestReqFromMinQueue.descr2, "newVirtualStart", virtualStartBound, "deltaVirtualStart", (virtualStartBound - queue.nextDispatchR)) + } + queue.nextDispatchR = virtualStartBound + } +} + +func (qs *queueSet) removeQueueIfEmptyLocked(r *request) { + if r.queue == nil { + return } // If there are more queues than desired and this one has no @@ -851,24 +982,10 @@ func removeQueueAndUpdateIndexes(queues []*queue, index int) []*queue { return keptQueues } -// preCreateOrUnblockGoroutine needs to be called before creating a -// goroutine associated with this queueSet or unblocking a blocked -// one, to properly update the accounting used in testing. -func (qs *queueSet) preCreateOrUnblockGoroutine() { - qs.counter.Add(1) -} - -// goroutineDoneOrBlocked needs to be called at the end of every -// goroutine associated with this queueSet or when such a goroutine is -// about to wait on some other goroutine to do something; this is to -// properly update the accounting used in testing. -func (qs *queueSet) goroutineDoneOrBlocked() { - qs.counter.Add(-1) -} - func (qs *queueSet) UpdateObservations() { - qs.obsPair.RequestsWaiting.Add(0) - qs.obsPair.RequestsExecuting.Add(0) + qs.reqsObsPair.RequestsWaiting.Add(0) + qs.reqsObsPair.RequestsExecuting.Add(0) + qs.execSeatsObs.Add(0) } func (qs *queueSet) Dump(includeRequestDetails bool) debug.QueueSetDump { @@ -881,7 +998,7 @@ func (qs *queueSet) Dump(includeRequestDetails bool) debug.QueueSetDump { SeatsInUse: qs.totSeatsInUse, } for i, q := range qs.queues { - d.Queues[i] = q.dump(includeRequestDetails) + d.Queues[i] = q.dumpLocked(includeRequestDetails) } return d } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go index 7f121d707..f1073b96b 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go @@ -28,7 +28,7 @@ import ( ) // request is a temporary container for "requests" with additional -// tracking fields required for the functionality FQScheduler +// tracking fields required for QueueSet functionality. type request struct { ctx context.Context @@ -41,18 +41,17 @@ type request struct { // a queue. queue *queue - // startTime is the real time when the request began executing - startTime time.Time - - // width of the request - width fcrequest.Width + // estimated amount of work of the request + workEstimate completedWorkEstimate // decision gets set to a `requestDecision` indicating what to do // with this request. It gets set exactly once, when the request // is removed from its queue. The value will be decisionReject, - // decisionCancel, or decisionExecute; decisionTryAnother never - // appears here. - decision promise.LockingWriteOnce + // decisionCancel, or decisionExecute. + // + // decision.Set is called with the queueSet locked. + // decision.Get is called without the queueSet locked. + decision promise.WriteOnce // arrivalTime is the real time when the request entered this system arrivalTime time.Time @@ -61,48 +60,91 @@ type request struct { // log messages descr1, descr2 interface{} - // Indicates whether client has called Request::Wait() - waitStarted bool - queueNoteFn fq.QueueNoteFn + // The preceding fields are filled in at creation and not modified since; + // the following fields may be modified later and must only be accessed while + // holding the queueSet's lock. + // Removes this request from its queue. If the request is not put into a // a queue it will be nil. - removeFromQueueFn removeFromFIFOFunc + removeFromQueueLocked removeFromFIFOFunc + + // arrivalR is R(arrivalTime). R is, confusingly, also called "virtual time". + // This field is meaningful only while the request is waiting in the virtual world. + arrivalR fcrequest.SeatSeconds + + // startTime is the real time when the request began executing + startTime time.Time + + // Indicates whether client has called Request::Wait() + waitStarted bool } -// queue is an array of requests with additional metadata required for -// the FQScheduler +type completedWorkEstimate struct { + fcrequest.WorkEstimate + totalWork fcrequest.SeatSeconds // initial plus final work + finalWork fcrequest.SeatSeconds // only final work +} + +// queue is a sequence of requests that have arrived but not yet finished +// execution in both the real and virtual worlds. type queue struct { - // The requests are stored in a FIFO list. + // The requests not yet executing in the real world are stored in a FIFO list. requests fifo - // virtualStart is the virtual time (virtual seconds since process - // startup) when the oldest request in the queue (if there is any) - // started virtually executing - virtualStart float64 + // nextDispatchR is the R progress meter reading at + // which the next request will be dispatched in the virtual world. + nextDispatchR fcrequest.SeatSeconds + // requestsExecuting is the count in the real world. requestsExecuting int - index int + + // index is the position of this queue among those in its queueSet. + index int // seatsInUse is the total number of "seats" currently occupied // by all the requests that are currently executing in this queue. seatsInUse int } -// Enqueue enqueues a request into the queue and -// sets the removeFromQueueFn of the request appropriately. -func (q *queue) Enqueue(request *request) { - request.removeFromQueueFn = q.requests.Enqueue(request) +// queueSum tracks the sum of initial seats, max seats, and +// totalWork from all requests in a given queue +type queueSum struct { + // InitialSeatsSum is the sum of InitialSeats + // associated with all requests in a given queue. + InitialSeatsSum int + + // MaxSeatsSum is the sum of MaxSeats + // associated with all requests in a given queue. + MaxSeatsSum int + + // TotalWorkSum is the sum of totalWork of the waiting requests + TotalWorkSum fcrequest.SeatSeconds } -// Dequeue dequeues a request from the queue -func (q *queue) Dequeue() (*request, bool) { - request, ok := q.requests.Dequeue() - return request, ok +func (req *request) totalWork() fcrequest.SeatSeconds { + return req.workEstimate.totalWork } -func (q *queue) dump(includeDetails bool) debug.QueueDump { +func (qs *queueSet) completeWorkEstimate(we *fcrequest.WorkEstimate) completedWorkEstimate { + finalWork := qs.computeFinalWork(we) + return completedWorkEstimate{ + WorkEstimate: *we, + totalWork: qs.computeInitialWork(we) + finalWork, + finalWork: finalWork, + } +} + +func (qs *queueSet) computeInitialWork(we *fcrequest.WorkEstimate) fcrequest.SeatSeconds { + return fcrequest.SeatsTimesDuration(float64(we.InitialSeats), qs.estimatedServiceDuration) +} + +func (qs *queueSet) computeFinalWork(we *fcrequest.WorkEstimate) fcrequest.SeatSeconds { + return fcrequest.SeatsTimesDuration(float64(we.FinalSeats), we.AdditionalLatency) +} + +func (q *queue) dumpLocked(includeDetails bool) debug.QueueDump { digest := make([]debug.RequestDump, q.requests.Length()) i := 0 q.requests.Walk(func(r *request) bool { @@ -111,6 +153,7 @@ func (q *queue) dump(includeDetails bool) debug.QueueDump { digest[i].FlowDistinguisher = r.flowDistinguisher digest[i].ArriveTime = r.arrivalTime digest[i].StartTime = r.startTime + digest[i].WorkEstimate = r.workEstimate.WorkEstimate if includeDetails { userInfo, _ := genericrequest.UserFrom(r.ctx) digest[i].UserName = userInfo.GetName() @@ -122,10 +165,19 @@ func (q *queue) dump(includeDetails bool) debug.QueueDump { i++ return true }) + + sum := q.requests.QueueSum() + queueSum := debug.QueueSum{ + InitialSeatsSum: sum.InitialSeatsSum, + MaxSeatsSum: sum.MaxSeatsSum, + TotalWorkSum: sum.TotalWorkSum.String(), + } + return debug.QueueDump{ - VirtualStart: q.virtualStart, + NextDispatchR: q.nextDispatchR.String(), Requests: digest, ExecutingRequests: q.requestsExecuting, SeatsInUse: q.seatsInUse, + QueueSum: queueSum, } } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go index d2c917e0b..2199c1412 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go @@ -21,7 +21,7 @@ import ( "encoding/json" "fmt" - flowcontrol "k8s.io/api/flowcontrol/v1beta1" + flowcontrol "k8s.io/api/flowcontrol/v1beta2" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" ) diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go index a0d228523..88f812490 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go @@ -18,14 +18,15 @@ package metrics import ( "context" + "strconv" "strings" "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" compbasemetrics "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" basemetricstestutil "k8s.io/component-base/metrics/testutil" + "k8s.io/utils/clock" ) const ( @@ -103,7 +104,28 @@ var ( }, []string{priorityLevel, flowSchema}, ) - + // PriorityLevelExecutionSeatsObserverGenerator creates observers of seats occupied throughout execution for priority levels + PriorityLevelExecutionSeatsObserverGenerator = NewSampleAndWaterMarkHistogramsGenerator(clock.RealClock{}, time.Millisecond, + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "priority_level_seat_count_samples", + Help: "Periodic observations of the number of requests", + Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, + ConstLabels: map[string]string{phase: "executing"}, + StabilityLevel: compbasemetrics.ALPHA, + }, + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "priority_level_seat_count_watermarks", + Help: "Watermarks of the number of requests", + Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, + ConstLabels: map[string]string{phase: "executing"}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel}, + ) // PriorityLevelConcurrencyObserverPairGenerator creates pairs that observe concurrency for priority levels PriorityLevelConcurrencyObserverPairGenerator = NewSampleAndWaterMarkHistogramsPairGenerator(clock.RealClock{}, time.Millisecond, &compbasemetrics.HistogramOpts{ @@ -122,8 +144,8 @@ var ( Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, StabilityLevel: compbasemetrics.ALPHA, }, - []string{priorityLevel}) - + []string{priorityLevel}, + ) // ReadWriteConcurrencyObserverPairGenerator creates pairs that observe concurrency broken down by mutating vs readonly ReadWriteConcurrencyObserverPairGenerator = NewSampleAndWaterMarkHistogramsPairGenerator(clock.RealClock{}, time.Millisecond, &compbasemetrics.HistogramOpts{ @@ -142,8 +164,8 @@ var ( Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}, StabilityLevel: compbasemetrics.ALPHA, }, - []string{requestKind}) - + []string{requestKind}, + ) apiserverCurrentR = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Namespace: namespace, @@ -154,7 +176,6 @@ var ( }, []string{priorityLevel}, ) - apiserverDispatchR = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Namespace: namespace, @@ -165,7 +186,6 @@ var ( }, []string{priorityLevel}, ) - apiserverLatestS = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Namespace: namespace, @@ -176,7 +196,6 @@ var ( }, []string{priorityLevel}, ) - apiserverNextSBounds = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Namespace: namespace, @@ -187,7 +206,6 @@ var ( }, []string{priorityLevel, "bound"}, ) - apiserverNextDiscountedSBounds = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Namespace: namespace, @@ -198,7 +216,6 @@ var ( }, []string{priorityLevel, "bound"}, ) - apiserverCurrentInqueueRequests = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Namespace: namespace, @@ -235,7 +252,7 @@ var ( Namespace: namespace, Subsystem: subsystem, Name: "current_executing_requests", - Help: "Number of requests currently executing in the API Priority and Fairness system", + Help: "Number of requests in regular execution phase in the API Priority and Fairness system", StabilityLevel: compbasemetrics.ALPHA, }, []string{priorityLevel, flowSchema}, @@ -245,7 +262,7 @@ var ( Namespace: namespace, Subsystem: subsystem, Name: "request_concurrency_in_use", - Help: "Concurrency (number of seats) occupided by the currently executing requests in the API Priority and Fairness system", + Help: "Concurrency (number of seats) occupided by the currently executing (all phases count) requests in the API Priority and Fairness system", StabilityLevel: compbasemetrics.ALPHA, }, []string{priorityLevel, flowSchema}, @@ -266,12 +283,34 @@ var ( Namespace: namespace, Subsystem: subsystem, Name: "request_execution_seconds", - Help: "Duration of request execution in the API Priority and Fairness system", + Help: "Duration of regular phase of request execution in the API Priority and Fairness system", Buckets: requestDurationSecondsBuckets, StabilityLevel: compbasemetrics.ALPHA, }, []string{priorityLevel, flowSchema}, ) + watchCountSamples = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "watch_count_samples", + Help: "count of watchers for mutating requests in API Priority and Fairness", + Buckets: []float64{0, 1, 10, 100, 1000, 10000}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) + apiserverEpochAdvances = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "epoch_advance_total", + Help: "Number of times the queueset's progress meter jumped backward", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, "success"}, + ) + metrics = Registerables{ apiserverRejectedRequestsTotal, apiserverDispatchedRequestsTotal, @@ -287,7 +326,10 @@ var ( apiserverCurrentExecutingRequests, apiserverRequestWaitingSeconds, apiserverRequestExecutionSeconds, + watchCountSamples, + apiserverEpochAdvances, }. + Append(PriorityLevelExecutionSeatsObserverGenerator.metrics()...). Append(PriorityLevelConcurrencyObserverPairGenerator.metrics()...). Append(ReadWriteConcurrencyObserverPairGenerator.metrics()...) ) @@ -352,3 +394,13 @@ func ObserveWaitingDuration(ctx context.Context, priorityLevel, flowSchema, exec func ObserveExecutionDuration(ctx context.Context, priorityLevel, flowSchema string, executionTime time.Duration) { apiserverRequestExecutionSeconds.WithContext(ctx).WithLabelValues(priorityLevel, flowSchema).Observe(executionTime.Seconds()) } + +// ObserveWatchCount notes a sampling of a watch count +func ObserveWatchCount(ctx context.Context, priorityLevel, flowSchema string, count int) { + watchCountSamples.WithLabelValues(priorityLevel, flowSchema).Observe(float64(count)) +} + +// AddEpochAdvance notes an advance of the progress meter baseline for a given priority level +func AddEpochAdvance(ctx context.Context, priorityLevel string, success bool) { + apiserverEpochAdvances.WithContext(ctx).WithLabelValues(priorityLevel, strconv.FormatBool(success)).Inc() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/observer.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/observer.go new file mode 100644 index 000000000..1e55a0e1e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/observer.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +// Observer is something that can be given numeric observations. +type Observer interface { + // Observe takes an observation + Observe(float64) +} + +// ChangeObserver extends Observer with the ability to take +// an observation that is relative to the previous observation. +type ChangeObserver interface { + Observer + + // Observe a new value that differs by the given amount from the previous observation. + Add(float64) +} + +// RatioedChangeObserver tracks ratios. +// The numerator is set/changed through the ChangeObserver methods, +// and the denominator can be updated through the SetDenominator method. +// A ratio is tracked whenever the numerator is set/changed. +type RatioedChangeObserver interface { + ChangeObserver + + // SetDenominator sets the denominator to use until it is changed again + SetDenominator(float64) +} + +// RatioedChangeObserverGenerator creates related observers that are +// differentiated by a series of label values +type RatioedChangeObserverGenerator interface { + Generate(initialNumerator, initialDenominator float64, labelValues []string) RatioedChangeObserver +} + +// RatioedChangeObserverPair is a corresponding pair of observers, one for the +// number of requests waiting in queue(s) and one for the number of +// requests being executed +type RatioedChangeObserverPair struct { + // RequestsWaiting is given observations of the number of currently queued requests + RequestsWaiting RatioedChangeObserver + + // RequestsExecuting is given observations of the number of requests currently executing + RequestsExecuting RatioedChangeObserver +} + +// RatioedChangeObserverPairGenerator generates pairs +type RatioedChangeObserverPairGenerator interface { + Generate(initialWaitingDenominator, initialExecutingDenominator float64, labelValues []string) RatioedChangeObserverPair +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go index 43bd13adb..29366b536 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go @@ -20,9 +20,9 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" compbasemetrics "k8s.io/component-base/metrics" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) const ( @@ -34,13 +34,13 @@ const ( labelValueExecuting = "executing" ) -// SampleAndWaterMarkPairGenerator makes pairs of TimedObservers that +// SampleAndWaterMarkPairGenerator makes pairs of RatioedChangeObservers that // track samples and watermarks. type SampleAndWaterMarkPairGenerator struct { urGenerator SampleAndWaterMarkObserverGenerator } -var _ TimedObserverPairGenerator = SampleAndWaterMarkPairGenerator{} +var _ RatioedChangeObserverPairGenerator = SampleAndWaterMarkPairGenerator{} // NewSampleAndWaterMarkHistogramsPairGenerator makes a new pair generator func NewSampleAndWaterMarkHistogramsPairGenerator(clock clock.PassiveClock, samplePeriod time.Duration, sampleOpts, waterMarkOpts *compbasemetrics.HistogramOpts, labelNames []string) SampleAndWaterMarkPairGenerator { @@ -50,10 +50,10 @@ func NewSampleAndWaterMarkHistogramsPairGenerator(clock clock.PassiveClock, samp } // Generate makes a new pair -func (spg SampleAndWaterMarkPairGenerator) Generate(waiting1, executing1 float64, labelValues []string) TimedObserverPair { - return TimedObserverPair{ - RequestsWaiting: spg.urGenerator.Generate(0, waiting1, append([]string{labelValueWaiting}, labelValues...)), - RequestsExecuting: spg.urGenerator.Generate(0, executing1, append([]string{labelValueExecuting}, labelValues...)), +func (spg SampleAndWaterMarkPairGenerator) Generate(initialWaitingDenominator, initialExecutingDenominator float64, labelValues []string) RatioedChangeObserverPair { + return RatioedChangeObserverPair{ + RequestsWaiting: spg.urGenerator.Generate(0, initialWaitingDenominator, append([]string{labelValueWaiting}, labelValues...)), + RequestsExecuting: spg.urGenerator.Generate(0, initialExecutingDenominator, append([]string{labelValueExecuting}, labelValues...)), } } @@ -61,7 +61,7 @@ func (spg SampleAndWaterMarkPairGenerator) metrics() Registerables { return spg.urGenerator.metrics() } -// SampleAndWaterMarkObserverGenerator creates TimedObservers that +// SampleAndWaterMarkObserverGenerator creates RatioedChangeObservers that // populate histograms of samples and low- and high-water-marks. The // generator has a samplePeriod, and the histograms get an observation // every samplePeriod. The sampling windows are quantized based on @@ -79,7 +79,7 @@ type sampleAndWaterMarkObserverGenerator struct { waterMarks *compbasemetrics.HistogramVec } -var _ TimedObserverGenerator = (*sampleAndWaterMarkObserverGenerator)(nil) +var _ RatioedChangeObserverGenerator = SampleAndWaterMarkObserverGenerator{} // NewSampleAndWaterMarkHistogramsGenerator makes a new one func NewSampleAndWaterMarkHistogramsGenerator(clock clock.PassiveClock, samplePeriod time.Duration, sampleOpts, waterMarkOpts *compbasemetrics.HistogramOpts, labelNames []string) SampleAndWaterMarkObserverGenerator { @@ -97,23 +97,23 @@ func (swg *sampleAndWaterMarkObserverGenerator) quantize(when time.Time) int64 { return int64(when.Sub(swg.t0) / swg.samplePeriod) } -// Generate makes a new TimedObserver -func (swg *sampleAndWaterMarkObserverGenerator) Generate(x, x1 float64, labelValues []string) TimedObserver { - relX := x / x1 +// Generate makes a new RatioedChangeObserver +func (swg *sampleAndWaterMarkObserverGenerator) Generate(initialNumerator, initialDenominator float64, labelValues []string) RatioedChangeObserver { + ratio := initialNumerator / initialDenominator when := swg.clock.Now() return &sampleAndWaterMarkHistograms{ sampleAndWaterMarkObserverGenerator: swg, labelValues: labelValues, loLabelValues: append([]string{labelValueLo}, labelValues...), hiLabelValues: append([]string{labelValueHi}, labelValues...), - x1: x1, + denominator: initialDenominator, sampleAndWaterMarkAccumulator: sampleAndWaterMarkAccumulator{ lastSet: when, lastSetInt: swg.quantize(when), - x: x, - relX: relX, - loRelX: relX, - hiRelX: relX, + numerator: initialNumerator, + ratio: ratio, + loRatio: ratio, + hiRatio: ratio, }} } @@ -127,39 +127,39 @@ type sampleAndWaterMarkHistograms struct { loLabelValues, hiLabelValues []string sync.Mutex - x1 float64 + denominator float64 sampleAndWaterMarkAccumulator } type sampleAndWaterMarkAccumulator struct { - lastSet time.Time - lastSetInt int64 // lastSet / samplePeriod - x float64 - relX float64 // x / x1 - loRelX, hiRelX float64 + lastSet time.Time + lastSetInt int64 // lastSet / samplePeriod + numerator float64 + ratio float64 // numerator/denominator + loRatio, hiRatio float64 } -var _ TimedObserver = (*sampleAndWaterMarkHistograms)(nil) +var _ RatioedChangeObserver = (*sampleAndWaterMarkHistograms)(nil) -func (saw *sampleAndWaterMarkHistograms) Add(deltaX float64) { +func (saw *sampleAndWaterMarkHistograms) Add(deltaNumerator float64) { saw.innerSet(func() { - saw.x += deltaX + saw.numerator += deltaNumerator }) } -func (saw *sampleAndWaterMarkHistograms) Set(x float64) { +func (saw *sampleAndWaterMarkHistograms) Observe(numerator float64) { saw.innerSet(func() { - saw.x = x + saw.numerator = numerator }) } -func (saw *sampleAndWaterMarkHistograms) SetX1(x1 float64) { +func (saw *sampleAndWaterMarkHistograms) SetDenominator(denominator float64) { saw.innerSet(func() { - saw.x1 = x1 + saw.denominator = denominator }) } -func (saw *sampleAndWaterMarkHistograms) innerSet(updateXOrX1 func()) { +func (saw *sampleAndWaterMarkHistograms) innerSet(updateNumeratorOrDenominator func()) { when, whenInt, acc, wellOrdered := func() (time.Time, int64, sampleAndWaterMarkAccumulator, bool) { saw.Lock() defer saw.Unlock() @@ -168,11 +168,11 @@ func (saw *sampleAndWaterMarkHistograms) innerSet(updateXOrX1 func()) { whenInt := saw.quantize(when) acc := saw.sampleAndWaterMarkAccumulator wellOrdered := !when.Before(acc.lastSet) - updateXOrX1() - saw.relX = saw.x / saw.x1 + updateNumeratorOrDenominator() + saw.ratio = saw.numerator / saw.denominator if wellOrdered { if acc.lastSetInt < whenInt { - saw.loRelX, saw.hiRelX = acc.relX, acc.relX + saw.loRatio, saw.hiRatio = acc.ratio, acc.ratio saw.lastSetInt = whenInt } saw.lastSet = when @@ -187,10 +187,10 @@ func (saw *sampleAndWaterMarkHistograms) innerSet(updateXOrX1 func()) { // would be wrong to update `saw.lastSet` in this case because // that plants a time bomb for future updates to // `saw.lastSetInt`. - if saw.relX < saw.loRelX { - saw.loRelX = saw.relX - } else if saw.relX > saw.hiRelX { - saw.hiRelX = saw.relX + if saw.ratio < saw.loRatio { + saw.loRatio = saw.ratio + } else if saw.ratio > saw.hiRatio { + saw.hiRatio = saw.ratio } return when, whenInt, acc, wellOrdered }() @@ -200,10 +200,10 @@ func (saw *sampleAndWaterMarkHistograms) innerSet(updateXOrX1 func()) { klog.Errorf("Time went backwards from %s to %s for labelValues=%#+v", lastSetS, whenS, saw.labelValues) } for acc.lastSetInt < whenInt { - saw.samples.WithLabelValues(saw.labelValues...).Observe(acc.relX) - saw.waterMarks.WithLabelValues(saw.loLabelValues...).Observe(acc.loRelX) - saw.waterMarks.WithLabelValues(saw.hiLabelValues...).Observe(acc.hiRelX) + saw.samples.WithLabelValues(saw.labelValues...).Observe(acc.ratio) + saw.waterMarks.WithLabelValues(saw.loLabelValues...).Observe(acc.loRatio) + saw.waterMarks.WithLabelValues(saw.hiLabelValues...).Observe(acc.hiRatio) acc.lastSetInt++ - acc.loRelX, acc.hiRelX = acc.relX, acc.relX + acc.loRatio, acc.hiRatio = acc.ratio, acc.ratio } } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go deleted file mode 100644 index 25f41493c..000000000 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -// TimedObserver gets informed about the values assigned to a variable -// `X float64` over time, and reports on the ratio `X/X1`. -type TimedObserver interface { - // Add notes a change to the variable - Add(deltaX float64) - - // Set notes a setting of the variable - Set(x float64) - - // SetX1 changes the value to use for X1 - SetX1(x1 float64) -} - -// TimedObserverGenerator creates related observers that are -// differentiated by a series of label values -type TimedObserverGenerator interface { - Generate(x, x1 float64, labelValues []string) TimedObserver -} - -// TimedObserverPair is a corresponding pair of observers, one for the -// number of requests waiting in queue(s) and one for the number of -// requests being executed -type TimedObserverPair struct { - // RequestsWaiting is given observations of the number of currently queued requests - RequestsWaiting TimedObserver - - // RequestsExecuting is given observations of the number of requests currently executing - RequestsExecuting TimedObserver -} - -// TimedObserverPairGenerator generates pairs -type TimedObserverPairGenerator interface { - Generate(waiting1, executing1 float64, labelValues []string) TimedObserverPair -} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go new file mode 100644 index 000000000..7fcc0903e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go @@ -0,0 +1,136 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "math" + "net/http" + "net/url" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog/v2" +) + +func newListWorkEstimator(countFn objectCountGetterFunc) WorkEstimatorFunc { + estimator := &listWorkEstimator{ + countGetterFn: countFn, + } + return estimator.estimate +} + +type listWorkEstimator struct { + countGetterFn objectCountGetterFunc +} + +func (e *listWorkEstimator) estimate(r *http.Request, flowSchemaName, priorityLevelName string) WorkEstimate { + requestInfo, ok := apirequest.RequestInfoFrom(r.Context()) + if !ok { + // no RequestInfo should never happen, but to be on the safe side + // let's return maximumSeats + return WorkEstimate{InitialSeats: maximumSeats} + } + + query := r.URL.Query() + listOptions := metav1.ListOptions{} + if err := metav1.Convert_url_Values_To_v1_ListOptions(&query, &listOptions, nil); err != nil { + klog.ErrorS(err, "Failed to convert options while estimating work for the list request") + + // This request is destined to fail in the validation layer, + // return maximumSeats for this request to be consistent. + return WorkEstimate{InitialSeats: maximumSeats} + } + isListFromCache := !shouldListFromStorage(query, &listOptions) + + numStored, err := e.countGetterFn(key(requestInfo)) + switch { + case err == ObjectCountStaleErr: + // object count going stale is indicative of degradation, so we should + // be conservative here and allocate maximum seats to this list request. + // NOTE: if a CRD is removed, its count will go stale first and then the + // pruner will eventually remove the CRD from the cache. + return WorkEstimate{InitialSeats: maximumSeats} + case err == ObjectCountNotFoundErr: + // there are two scenarios in which we can see this error: + // a. the type is truly unknown, a typo on the caller's part. + // b. the count has gone stale for too long and the pruner + // has removed the type from the cache. + // we don't have a way to distinguish between a and b. b seems to indicate + // to a more severe case of degradation, although b can naturally trigger + // when a CRD is removed. let's be conservative and allocate maximum seats. + return WorkEstimate{InitialSeats: maximumSeats} + case err != nil: + // we should never be here since Get returns either ObjectCountStaleErr or + // ObjectCountNotFoundErr, return maximumSeats to be on the safe side. + return WorkEstimate{InitialSeats: maximumSeats} + } + + limit := numStored + if utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) && listOptions.Limit > 0 && + listOptions.Limit < numStored { + limit = listOptions.Limit + } + + var estimatedObjectsToBeProcessed int64 + + switch { + case isListFromCache: + // TODO: For resources that implement indexes at the watchcache level, + // we need to adjust the cost accordingly + estimatedObjectsToBeProcessed = numStored + case listOptions.FieldSelector != "" || listOptions.LabelSelector != "": + estimatedObjectsToBeProcessed = numStored + limit + default: + estimatedObjectsToBeProcessed = 2 * limit + } + + // for now, our rough estimate is to allocate one seat to each 100 obejcts that + // will be processed by the list request. + // we will come up with a different formula for the transformation function and/or + // fine tune this number in future iteratons. + seats := uint(math.Ceil(float64(estimatedObjectsToBeProcessed) / float64(100))) + + // make sure we never return a seat of zero + if seats < minimumSeats { + seats = minimumSeats + } + if seats > maximumSeats { + seats = maximumSeats + } + return WorkEstimate{InitialSeats: seats} +} + +func key(requestInfo *apirequest.RequestInfo) string { + groupResource := &schema.GroupResource{ + Group: requestInfo.APIGroup, + Resource: requestInfo.Resource, + } + return groupResource.String() +} + +// NOTICE: Keep in sync with shouldDelegateList function in +// staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go +func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool { + resourceVersion := opts.ResourceVersion + pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) + hasContinuation := pagingEnabled && len(opts.Continue) > 0 + hasLimit := pagingEnabled && opts.Limit > 0 && resourceVersion != "0" + return resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/mutating_work_estimator.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/mutating_work_estimator.go new file mode 100644 index 000000000..1c6c441e2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/mutating_work_estimator.go @@ -0,0 +1,143 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "math" + "net/http" + "time" + + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/util/flowcontrol/metrics" +) + +const ( + watchesPerSeat = 10.0 + eventAdditionalDuration = 5 * time.Millisecond + // TODO(wojtekt): Remove it once we tune the algorithm to not fail + // scalability tests. + enableMutatingWorkEstimator = false +) + +func newMutatingWorkEstimator(countFn watchCountGetterFunc) WorkEstimatorFunc { + return newTestMutatingWorkEstimator(countFn, enableMutatingWorkEstimator) +} + +func newTestMutatingWorkEstimator(countFn watchCountGetterFunc, enabled bool) WorkEstimatorFunc { + estimator := &mutatingWorkEstimator{ + countFn: countFn, + enabled: enabled, + } + return estimator.estimate +} + +type mutatingWorkEstimator struct { + countFn watchCountGetterFunc + enabled bool +} + +func (e *mutatingWorkEstimator) estimate(r *http.Request, flowSchemaName, priorityLevelName string) WorkEstimate { + // TODO(wojtekt): Remove once we tune the algorithm to not fail + // scalability tests. + if !e.enabled { + return WorkEstimate{ + InitialSeats: 1, + } + } + + requestInfo, ok := apirequest.RequestInfoFrom(r.Context()) + if !ok { + // no RequestInfo should never happen, but to be on the safe side + // let's return a large value. + return WorkEstimate{ + InitialSeats: 1, + FinalSeats: maximumSeats, + AdditionalLatency: eventAdditionalDuration, + } + } + watchCount := e.countFn(requestInfo) + metrics.ObserveWatchCount(r.Context(), priorityLevelName, flowSchemaName, watchCount) + + // The cost of the request associated with the watchers of that event + // consists of three parts: + // - cost of going through the event change logic + // - cost of serialization of the event + // - cost of processing an event object for each watcher (e.g. filtering, + // sending data over network) + // We're starting simple to get some operational experience with it and + // we will work on tuning the algorithm later. Given that the actual work + // associated with processing watch events is happening in multiple + // goroutines (proportional to the number of watchers) that are all + // resumed at once, as a starting point we assume that each such goroutine + // is taking 1/Nth of a seat for M milliseconds. + // We allow the accounting of that work in P&F to be reshaped into another + // rectangle of equal area for practical reasons. + var finalSeats uint + var additionalLatency time.Duration + + // TODO: Make this unconditional after we tune the algorithm better. + // Technically, there is an overhead connected to processing an event after + // the request finishes even if there is a small number of watches. + // However, until we tune the estimation we want to stay on the safe side + // an avoid introducing additional latency for almost every single request. + if watchCount >= watchesPerSeat { + // TODO: As described in the KEP, we should take into account that not all + // events are equal and try to estimate the cost of a single event based on + // some historical data about size of events. + finalSeats = uint(math.Ceil(float64(watchCount) / watchesPerSeat)) + finalWork := SeatsTimesDuration(float64(finalSeats), eventAdditionalDuration) + + // While processing individual events is highly parallel, + // the design/implementation of P&F has a couple limitations that + // make using this assumption in the P&F implementation very + // inefficient because: + // - we reserve max(initialSeats, finalSeats) for time of executing + // both phases of the request + // - even more importantly, when a given `wide` request is the one to + // be dispatched, we are not dispatching any other request until + // we accumulate enough seats to dispatch the nominated one, even + // if currently unoccupied seats would allow for dispatching some + // other requests in the meantime + // As a consequence of these, the wider the request, the more capacity + // will effectively be blocked and unused during dispatching and + // executing this request. + // + // To mitigate the impact of it, we're capping the maximum number of + // seats that can be assigned to a given request. Thanks to it: + // 1) we reduce the amount of seat-seconds that are "wasted" during + // dispatching and executing initial phase of the request + // 2) we are not changing the finalWork estimate - just potentially + // reshaping it to be narrower and longer. As long as the maximum + // seats setting will prevent dispatching too many requests at once + // to prevent overloading kube-apiserver (and/or etcd or the VM or + // a physical machine it is running on), we believe the relaxed + // version should be good enough to achieve the P&F goals. + // + // TODO: Confirm that the current cap of maximumSeats allow us to + // achieve the above. + if finalSeats > maximumSeats { + finalSeats = maximumSeats + } + additionalLatency = finalWork.DurationPerSeat(float64(finalSeats)) + } + + return WorkEstimate{ + InitialSeats: 1, + FinalSeats: finalSeats, + AdditionalLatency: additionalLatency, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/object_count_tracker.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/object_count_tracker.go new file mode 100644 index 000000000..dd1d5e570 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/object_count_tracker.go @@ -0,0 +1,166 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "errors" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + "k8s.io/utils/clock" +) + +const ( + // type deletion (it applies mostly to CRD) is not a very frequent + // operation so we can afford to prune the cache at a large interval. + // at the same time, we also want to make sure that the scalability + // tests hit this code path. + pruneInterval = 1 * time.Hour + + // the storage layer polls for object count at every 1m interval, we will allow + // up to 2-3 transient failures to get the latest count for a given resource. + staleTolerationThreshold = 3 * time.Minute +) + +var ( + // ObjectCountNotFoundErr is returned when the object count for + // a given resource is not being tracked. + ObjectCountNotFoundErr = errors.New("object count not found for the given resource") + + // ObjectCountStaleErr is returned when the object count for a + // given resource has gone stale due to transient failures. + ObjectCountStaleErr = errors.New("object count has gone stale for the given resource") +) + +// StorageObjectCountTracker is an interface that is used to keep track of +// of the total number of objects for each resource. +// {group}.{resource} is used as the key name to update and retrieve +// the total number of objects for a given resource. +type StorageObjectCountTracker interface { + // Set is invoked to update the current number of total + // objects for the given resource + Set(string, int64) + + // Get returns the total number of objects for the given resource. + // The following errors are returned: + // - if the count has gone stale for a given resource due to transient + // failures ObjectCountStaleErr is returned. + // - if the given resource is not being tracked then + // ObjectCountNotFoundErr is returned. + Get(string) (int64, error) +} + +// NewStorageObjectCountTracker returns an instance of +// StorageObjectCountTracker interface that can be used to +// keep track of the total number of objects for each resource. +func NewStorageObjectCountTracker(stopCh <-chan struct{}) StorageObjectCountTracker { + tracker := &objectCountTracker{ + clock: &clock.RealClock{}, + counts: map[string]*timestampedCount{}, + } + go func() { + wait.PollUntil( + pruneInterval, + func() (bool, error) { + // always prune at every pruneInterval + return false, tracker.prune(pruneInterval) + }, stopCh) + klog.InfoS("StorageObjectCountTracker pruner is exiting") + }() + + return tracker +} + +// timestampedCount stores the count of a given resource with a last updated +// timestamp so we can prune it after it goes stale for certain threshold. +type timestampedCount struct { + count int64 + lastUpdatedAt time.Time +} + +// objectCountTracker implements StorageObjectCountTracker with +// reader/writer mutual exclusion lock. +type objectCountTracker struct { + clock clock.PassiveClock + + lock sync.RWMutex + counts map[string]*timestampedCount +} + +func (t *objectCountTracker) Set(groupResource string, count int64) { + if count <= -1 { + // a value of -1 indicates that the 'Count' call failed to contact + // the storage layer, in most cases this error can be transient. + // we will continue to work with the count that is in the cache + // up to a certain threshold defined by staleTolerationThreshold. + // in case this becomes a non transient error then the count for + // the given resource will will eventually be removed from + // the cache by the pruner. + return + } + + now := t.clock.Now() + + // lock for writing + t.lock.Lock() + defer t.lock.Unlock() + + if item, ok := t.counts[groupResource]; ok { + item.count = count + item.lastUpdatedAt = now + return + } + + t.counts[groupResource] = ×tampedCount{ + count: count, + lastUpdatedAt: now, + } +} + +func (t *objectCountTracker) Get(groupResource string) (int64, error) { + staleThreshold := t.clock.Now().Add(-staleTolerationThreshold) + + t.lock.RLock() + defer t.lock.RUnlock() + + if item, ok := t.counts[groupResource]; ok { + if item.lastUpdatedAt.Before(staleThreshold) { + return item.count, ObjectCountStaleErr + } + return item.count, nil + } + return 0, ObjectCountNotFoundErr +} + +func (t *objectCountTracker) prune(threshold time.Duration) error { + oldestLastUpdatedAtAllowed := t.clock.Now().Add(-threshold) + + // lock for writing + t.lock.Lock() + defer t.lock.Unlock() + + for groupResource, count := range t.counts { + if count.lastUpdatedAt.After(oldestLastUpdatedAtAllowed) { + continue + } + delete(t.counts, groupResource) + } + + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/seat_seconds.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/seat_seconds.go new file mode 100644 index 000000000..e3a401745 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/seat_seconds.go @@ -0,0 +1,65 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "fmt" + "math" + "time" +) + +// SeatSeconds is a measure of work, in units of seat-seconds, using a fixed-point representation. +// `SeatSeconds(n)` represents `n/ssScale` seat-seconds. +// The `ssScale` constant is private to the implementation here, +// no other code should use it. +type SeatSeconds uint64 + +// MaxSeatsSeconds is the maximum representable value of SeatSeconds +const MaxSeatSeconds = SeatSeconds(math.MaxUint64) + +// MinSeatSeconds is the lowest representable value of SeatSeconds +const MinSeatSeconds = SeatSeconds(0) + +// SeatsTimeDuration produces the SeatSeconds value for the given factors. +// This is intended only to produce small values, increments in work +// rather than amount of work done since process start. +func SeatsTimesDuration(seats float64, duration time.Duration) SeatSeconds { + return SeatSeconds(math.Round(seats * float64(duration/time.Nanosecond) / (1e9 / ssScale))) +} + +// ToFloat converts to a floating-point representation. +// This conversion may lose precision. +func (ss SeatSeconds) ToFloat() float64 { + return float64(ss) / ssScale +} + +// DurationPerSeat returns duration per seat. +// This division may lose precision. +func (ss SeatSeconds) DurationPerSeat(seats float64) time.Duration { + return time.Duration(float64(ss) / seats * (float64(time.Second) / ssScale)) +} + +// String converts to a string. +// This is suitable for large as well as small values. +func (ss SeatSeconds) String() string { + const div = SeatSeconds(ssScale) + quo := ss / div + rem := ss - quo*div + return fmt.Sprintf("%d.%08dss", quo, rem) +} + +const ssScale = 1e8 diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/width.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/width.go index c5fa478a7..675433c2c 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/width.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/width.go @@ -17,31 +17,99 @@ limitations under the License. package request import ( + "fmt" "net/http" + "time" + + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/klog/v2" ) -type Width struct { - // Seats represents the number of seats associated with this request - Seats uint +const ( + // the minimum number of seats a request must occupy + minimumSeats = 1 + + // the maximum number of seats a request can occupy + maximumSeats = 10 +) + +// WorkEstimate carries three of the four parameters that determine the work in a request. +// The fourth parameter is the duration of the initial phase of execution. +type WorkEstimate struct { + // InitialSeats is the number of seats occupied while the server is + // executing this request. + InitialSeats uint + + // FinalSeats is the number of seats occupied at the end, + // during the AdditionalLatency. + FinalSeats uint + + // AdditionalLatency specifies the additional duration the seats allocated + // to this request must be reserved after the given request had finished. + // AdditionalLatency should not have any impact on the user experience, the + // caller must not experience this additional latency. + AdditionalLatency time.Duration } -// DefaultWidthEstimator returns returns '1' as the "width" -// of the given request. -// -// TODO: when we plumb in actual "width" handling for different -// type of request(s) this function will iterate through a chain -// of widthEstimator instance(s). -func DefaultWidthEstimator(_ *http.Request) Width { - return Width{ - Seats: 1, +// MaxSeats returns the maximum number of seats the request occupies over the +// phases of being served. +func (we *WorkEstimate) MaxSeats() int { + if we.InitialSeats >= we.FinalSeats { + return int(we.InitialSeats) } + + return int(we.FinalSeats) } -// WidthEstimatorFunc returns the estimated "width" of a given request. +// objectCountGetterFunc represents a function that gets the total +// number of objects for a given resource. +type objectCountGetterFunc func(string) (int64, error) + +// watchCountGetterFunc represents a function that gets the total +// number of watchers potentially interested in a given request. +type watchCountGetterFunc func(*apirequest.RequestInfo) int + +// NewWorkEstimator estimates the work that will be done by a given request, +// if no WorkEstimatorFunc matches the given request then the default +// work estimate of 1 seat is allocated to the request. +func NewWorkEstimator(objectCountFn objectCountGetterFunc, watchCountFn watchCountGetterFunc) WorkEstimatorFunc { + estimator := &workEstimator{ + listWorkEstimator: newListWorkEstimator(objectCountFn), + mutatingWorkEstimator: newMutatingWorkEstimator(watchCountFn), + } + return estimator.estimate +} + +// WorkEstimatorFunc returns the estimated work of a given request. // This function will be used by the Priority & Fairness filter to -// estimate the "width" of incoming requests. -type WidthEstimatorFunc func(*http.Request) Width +// estimate the work of of incoming requests. +type WorkEstimatorFunc func(request *http.Request, flowSchemaName, priorityLevelName string) WorkEstimate -func (e WidthEstimatorFunc) EstimateWidth(r *http.Request) Width { - return e(r) +func (e WorkEstimatorFunc) EstimateWork(r *http.Request, flowSchemaName, priorityLevelName string) WorkEstimate { + return e(r, flowSchemaName, priorityLevelName) +} + +type workEstimator struct { + // listWorkEstimator estimates work for list request(s) + listWorkEstimator WorkEstimatorFunc + // mutatingWorkEstimator calculates the width of mutating request(s) + mutatingWorkEstimator WorkEstimatorFunc +} + +func (e *workEstimator) estimate(r *http.Request, flowSchemaName, priorityLevelName string) WorkEstimate { + requestInfo, ok := apirequest.RequestInfoFrom(r.Context()) + if !ok { + klog.ErrorS(fmt.Errorf("no RequestInfo found in context"), "Failed to estimate work for the request", "URI", r.RequestURI) + // no RequestInfo should never happen, but to be on the safe side let's return maximumSeats + return WorkEstimate{InitialSeats: maximumSeats} + } + + switch requestInfo.Verb { + case "list": + return e.listWorkEstimator.EstimateWork(r, flowSchemaName, priorityLevelName) + case "create", "update", "patch", "delete": + return e.mutatingWorkEstimator.EstimateWork(r, flowSchemaName, priorityLevelName) + } + + return WorkEstimate{InitialSeats: minimumSeats} } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go index 765e28790..83380e374 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go @@ -19,7 +19,7 @@ package flowcontrol import ( "strings" - flowcontrol "k8s.io/api/flowcontrol/v1beta1" + flowcontrol "k8s.io/api/flowcontrol/v1beta2" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go index 4dc7c131e..e4ebb2145 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go @@ -17,10 +17,16 @@ limitations under the License. package flowcontrol import ( + "net/http" "sync" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/endpoints/request" + + "k8s.io/klog/v2" ) // readOnlyVerbs contains verbs for read-only requests. @@ -51,10 +57,10 @@ type ForgetWatchFunc func() // of watches in the system for the purpose of estimating the // cost of incoming mutating requests. type WatchTracker interface { - // RegisterWatch reqisters a watch with the provided requestInfo + // RegisterWatch reqisters a watch based on the provided http.Request // in the tracker. It returns the function that should be called // to forget the watcher once it is finished. - RegisterWatch(requestInfo *request.RequestInfo) ForgetWatchFunc + RegisterWatch(r *http.Request) ForgetWatchFunc // GetInterestedWatchCount returns the number of watches that are // potentially interested in a request with a given RequestInfo @@ -62,26 +68,81 @@ type WatchTracker interface { GetInterestedWatchCount(requestInfo *request.RequestInfo) int } +// builtinIndexes represents of set of indexes registered in +// watchcache that are indexing watches and increase speed of +// their processing. +// We define the indexes as a map from a resource to the path +// to the field in the object on which the index is built. +type builtinIndexes map[string]string + +func getBuiltinIndexes() builtinIndexes { + // The only existing indexes as of now are: + // - spec.nodeName for pods + // - metadata.Name for nodes, secrets and configmaps + // However, we can ignore the latter, because the requestInfo.Name + // is set for them (i.e. we already catch them correctly). + return map[string]string{ + "pods": "spec.nodeName", + } +} + // watchTracker tracks the number of watches in the system for // the purpose of estimating the cost of incoming mutating requests. type watchTracker struct { - lock sync.Mutex + // indexes represents a set of registered indexes. + // It can't change after creation. + indexes builtinIndexes + lock sync.Mutex watchCount map[watchIdentifier]int } func NewWatchTracker() WatchTracker { return &watchTracker{ + indexes: getBuiltinIndexes(), watchCount: make(map[watchIdentifier]int), } } +const ( + unsetValue = "" +) + +func getIndexValue(r *http.Request, field string) string { + opts := metainternalversion.ListOptions{} + if err := scheme.ParameterCodec.DecodeParameters(r.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { + klog.Warningf("Couldn't parse list options for %v: %v", r.URL.Query(), err) + return unsetValue + } + if opts.FieldSelector == nil { + return unsetValue + } + if value, ok := opts.FieldSelector.RequiresExactMatch(field); ok { + return value + } + return unsetValue +} + +type indexValue struct { + resource string + value string +} + // RegisterWatch implements WatchTracker interface. -func (w *watchTracker) RegisterWatch(requestInfo *request.RequestInfo) ForgetWatchFunc { - if requestInfo == nil || requestInfo.Verb != "watch" { +func (w *watchTracker) RegisterWatch(r *http.Request) ForgetWatchFunc { + requestInfo, ok := request.RequestInfoFrom(r.Context()) + if !ok || requestInfo == nil || requestInfo.Verb != "watch" { return nil } + var index *indexValue + if indexField, ok := w.indexes[requestInfo.Resource]; ok { + index = &indexValue{ + resource: requestInfo.Resource, + value: getIndexValue(r, indexField), + } + } + identifier := &watchIdentifier{ apiGroup: requestInfo.APIGroup, resource: requestInfo.Resource, @@ -91,16 +152,40 @@ func (w *watchTracker) RegisterWatch(requestInfo *request.RequestInfo) ForgetWat w.lock.Lock() defer w.lock.Unlock() - w.watchCount[*identifier]++ - return w.forgetWatch(identifier) + w.updateIndexLocked(identifier, index, 1) + return w.forgetWatch(identifier, index) } -func (w *watchTracker) forgetWatch(identifier *watchIdentifier) ForgetWatchFunc { +func (w *watchTracker) updateIndexLocked(identifier *watchIdentifier, index *indexValue, incr int) { + if index == nil { + w.watchCount[*identifier] += incr + } else { + // For resources with defined index, for a given watch event we are + // only processing the watchers that: + // (a) do not specify field selector for an index field + // (b) do specify field selector with the value equal to the value + // coming from the processed object + // + // TODO(wojtek-t): For the sake of making progress and initially + // simplifying the implementation, we approximate (b) for all values + // as the value for an empty string. The assumption we're making here + // is that the difference between the actual number of watchers that + // will be processed, i.e. (a)+(b) above and the one from our + // approximation i.e. (a)+[(b) for field value of ""] will be small. + // This seem to be true in almost all production clusters, which makes + // it a reasonable first step simplification to unblock progres on it. + if index.value == unsetValue || index.value == "" { + w.watchCount[*identifier] += incr + } + } +} + +func (w *watchTracker) forgetWatch(identifier *watchIdentifier, index *indexValue) ForgetWatchFunc { return func() { w.lock.Lock() defer w.lock.Unlock() - w.watchCount[*identifier]-- + w.updateIndexLocked(identifier, index, -1) if w.watchCount[*identifier] == 0 { delete(w.watchCount, *identifier) } diff --git a/vendor/k8s.io/apiserver/pkg/util/openapi/enablement.go b/vendor/k8s.io/apiserver/pkg/util/openapi/enablement.go new file mode 100644 index 000000000..693821ac0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/openapi/enablement.go @@ -0,0 +1,83 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "strings" + + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/schemamutation" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// enumTypeDescriptionHeader is the header of enum section in schema description. +const enumTypeDescriptionHeader = "Possible enum values:" + +// GetOpenAPIDefinitionsWithoutDisabledFeatures wraps a GetOpenAPIDefinitions to revert +// any change to the schema that was made by disabled features. +func GetOpenAPIDefinitionsWithoutDisabledFeatures(GetOpenAPIDefinitions common.GetOpenAPIDefinitions) common.GetOpenAPIDefinitions { + return func(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { + defs := GetOpenAPIDefinitions(ref) + restoreDefinitions(defs) + return defs + } +} + +// restoreDefinitions restores any changes by disabled features from definition map. +func restoreDefinitions(defs map[string]common.OpenAPIDefinition) { + // revert changes from OpenAPIEnums + if !utilfeature.DefaultFeatureGate.Enabled(genericfeatures.OpenAPIEnums) { + for gvk, def := range defs { + orig := &def.Schema + if ret := pruneEnums(orig); ret != orig { + def.Schema = *ret + defs[gvk] = def + } + } + } +} + +func pruneEnums(schema *spec.Schema) *spec.Schema { + walker := schemamutation.Walker{ + SchemaCallback: func(schema *spec.Schema) *spec.Schema { + orig := schema + clone := func() { + if orig == schema { // if schema has not been mutated yet + schema = new(spec.Schema) + *schema = *orig // make a clone from orig to schema + } + } + if headerIndex := strings.Index(schema.Description, enumTypeDescriptionHeader); headerIndex != -1 { + // remove the enum section from description. + // note that the new lines before the header should be removed too, + // thus the slice range. + clone() + schema.Description = schema.Description[:headerIndex] + } + if len(schema.Enum) != 0 { + // remove the enum field + clone() + schema.Enum = nil + } + return schema + }, + RefCallback: schemamutation.RefCallbackNoop, + } + return walker.WalkSchema(schema) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/error.go b/vendor/k8s.io/apiserver/pkg/util/webhook/error.go index 9c6f780e2..3c5d39ae3 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/error.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/error.go @@ -28,6 +28,7 @@ import ( type ErrCallingWebhook struct { WebhookName string Reason error + Status *apierrors.StatusError } func (e *ErrCallingWebhook) Error() string { diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go new file mode 100644 index 000000000..ba01d5d3c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// with apply. +type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { + WhenDeleted *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` +} + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// apply. +func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} +} + +// WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenDeleted field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenDeleted = &value + return b +} + +// WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenScaled field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenScaled = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go index ade14180b..ee0ed40a5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go @@ -27,15 +27,16 @@ import ( // StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { - Replicas *int32 `json:"replicas,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` - VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` - ServiceName *string `json:"serviceName,omitempty"` - PodManagementPolicy *appsv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` - UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` - MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` + VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` + PodManagementPolicy *appsv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` } // StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with @@ -120,3 +121,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *St b.MinReadySeconds = &value return b } + +// WithPersistentVolumeClaimRetentionPolicy sets the PersistentVolumeClaimRetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolumeClaimRetentionPolicy field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPolicy(value *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.PersistentVolumeClaimRetentionPolicy = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go new file mode 100644 index 000000000..0048724c0 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// with apply. +type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { + WhenDeleted *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` +} + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// apply. +func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} +} + +// WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenDeleted field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenDeleted = &value + return b +} + +// WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenScaled field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenScaled = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go index befd1f7e0..886433d9e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go @@ -27,15 +27,16 @@ import ( // StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { - Replicas *int32 `json:"replicas,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` - VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` - ServiceName *string `json:"serviceName,omitempty"` - PodManagementPolicy *v1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` - UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` - MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` + VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` + PodManagementPolicy *v1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` } // StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with @@ -120,3 +121,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *St b.MinReadySeconds = &value return b } + +// WithPersistentVolumeClaimRetentionPolicy sets the PersistentVolumeClaimRetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolumeClaimRetentionPolicy field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPolicy(value *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.PersistentVolumeClaimRetentionPolicy = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go new file mode 100644 index 000000000..aee27803d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" +) + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// with apply. +type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { + WhenDeleted *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` +} + +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// apply. +func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} +} + +// WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenDeleted field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenDeleted = &value + return b +} + +// WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WhenScaled field is set to the value of the last call. +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { + b.WhenScaled = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go index 44044be26..08922cea5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go @@ -27,15 +27,16 @@ import ( // StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { - Replicas *int32 `json:"replicas,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` - VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` - ServiceName *string `json:"serviceName,omitempty"` - PodManagementPolicy *v1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` - UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` - MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` + VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` + PodManagementPolicy *v1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` } // StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with @@ -120,3 +121,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *St b.MinReadySeconds = &value return b } + +// WithPersistentVolumeClaimRetentionPolicy sets the PersistentVolumeClaimRetentionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolumeClaimRetentionPolicy field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPolicy(value *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.PersistentVolumeClaimRetentionPolicy = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go new file mode 100644 index 000000000..15ef216d1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// with apply. +type ContainerResourceMetricSourceApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` + Container *string `json:"container,omitempty"` +} + +// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// apply. +func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { + return &ContainerResourceMetricSourceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ContainerResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *ContainerResourceMetricSourceApplyConfiguration { + b.Name = &value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *ContainerResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *ContainerResourceMetricSourceApplyConfiguration { + b.Target = value + return b +} + +// WithContainer sets the Container field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Container field is set to the value of the last call. +func (b *ContainerResourceMetricSourceApplyConfiguration) WithContainer(value string) *ContainerResourceMetricSourceApplyConfiguration { + b.Container = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go new file mode 100644 index 000000000..34213bca3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// with apply. +type ContainerResourceMetricStatusApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` + Container *string `json:"container,omitempty"` +} + +// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// apply. +func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { + return &ContainerResourceMetricStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ContainerResourceMetricStatusApplyConfiguration) WithName(value v1.ResourceName) *ContainerResourceMetricStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *ContainerResourceMetricStatusApplyConfiguration) WithCurrent(value *MetricValueStatusApplyConfiguration) *ContainerResourceMetricStatusApplyConfiguration { + b.Current = value + return b +} + +// WithContainer sets the Container field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Container field is set to the value of the last call. +func (b *ContainerResourceMetricStatusApplyConfiguration) WithContainer(value string) *ContainerResourceMetricStatusApplyConfiguration { + b.Container = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go new file mode 100644 index 000000000..19045706d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// with apply. +type CrossVersionObjectReferenceApplyConfiguration struct { + Kind *string `json:"kind,omitempty"` + Name *string `json:"name,omitempty"` + APIVersion *string `json:"apiVersion,omitempty"` +} + +// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// apply. +func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { + return &CrossVersionObjectReferenceApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CrossVersionObjectReferenceApplyConfiguration) WithKind(value string) *CrossVersionObjectReferenceApplyConfiguration { + b.Kind = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CrossVersionObjectReferenceApplyConfiguration) WithName(value string) *CrossVersionObjectReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *CrossVersionObjectReferenceApplyConfiguration) WithAPIVersion(value string) *CrossVersionObjectReferenceApplyConfiguration { + b.APIVersion = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go new file mode 100644 index 000000000..11a8eff26 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// with apply. +type ExternalMetricSourceApplyConfiguration struct { + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` +} + +// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// apply. +func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { + return &ExternalMetricSourceApplyConfiguration{} +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *ExternalMetricSourceApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *ExternalMetricSourceApplyConfiguration { + b.Metric = value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *ExternalMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *ExternalMetricSourceApplyConfiguration { + b.Target = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go new file mode 100644 index 000000000..3b1a0329b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// with apply. +type ExternalMetricStatusApplyConfiguration struct { + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` + Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` +} + +// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// apply. +func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { + return &ExternalMetricStatusApplyConfiguration{} +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *ExternalMetricStatusApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *ExternalMetricStatusApplyConfiguration { + b.Metric = value + return b +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *ExternalMetricStatusApplyConfiguration) WithCurrent(value *MetricValueStatusApplyConfiguration) *ExternalMetricStatusApplyConfiguration { + b.Current = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go new file mode 100644 index 000000000..af805488e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go @@ -0,0 +1,276 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + autoscalingv2 "k8s.io/api/autoscaling/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// with apply. +type HorizontalPodAutoscalerApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"` + Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` +} + +// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// apply. +func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { + b := &HorizontalPodAutoscalerApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("HorizontalPodAutoscaler") + b.WithAPIVersion("autoscaling/v2") + return b +} + +// ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from +// horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a +// HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API. +// ExtractHorizontalPodAutoscaler provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "") +} + +// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status") +} + +func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) { + b := &HorizontalPodAutoscalerApplyConfiguration{} + err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(horizontalPodAutoscaler.Name) + b.WithNamespace(horizontalPodAutoscaler.Namespace) + + b.WithKind("HorizontalPodAutoscaler") + b.WithAPIVersion("autoscaling/v2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithSelfLink(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.SelfLink = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +// WithClusterName sets the ClusterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterName field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithClusterName(value string) *HorizontalPodAutoscalerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ClusterName = &value + return b +} + +func (b *HorizontalPodAutoscalerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithSpec(value *HorizontalPodAutoscalerSpecApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *HorizontalPodAutoscalerStatusApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go new file mode 100644 index 000000000..e6fdabd7c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use +// with apply. +type HorizontalPodAutoscalerBehaviorApplyConfiguration struct { + ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"` + ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"` +} + +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with +// apply. +func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration { + return &HorizontalPodAutoscalerBehaviorApplyConfiguration{} +} + +// WithScaleUp sets the ScaleUp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScaleUp field is set to the value of the last call. +func (b *HorizontalPodAutoscalerBehaviorApplyConfiguration) WithScaleUp(value *HPAScalingRulesApplyConfiguration) *HorizontalPodAutoscalerBehaviorApplyConfiguration { + b.ScaleUp = value + return b +} + +// WithScaleDown sets the ScaleDown field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScaleDown field is set to the value of the last call. +func (b *HorizontalPodAutoscalerBehaviorApplyConfiguration) WithScaleDown(value *HPAScalingRulesApplyConfiguration) *HorizontalPodAutoscalerBehaviorApplyConfiguration { + b.ScaleDown = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go new file mode 100644 index 000000000..c020eccd3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go @@ -0,0 +1,81 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// with apply. +type HorizontalPodAutoscalerConditionApplyConfiguration struct { + Type *v2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// apply. +func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { + return &HorizontalPodAutoscalerConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *HorizontalPodAutoscalerConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *HorizontalPodAutoscalerConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithReason(value string) *HorizontalPodAutoscalerConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithMessage(value string) *HorizontalPodAutoscalerConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go new file mode 100644 index 000000000..c36bc3f22 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// with apply. +type HorizontalPodAutoscalerSpecApplyConfiguration struct { + ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` + MinReplicas *int32 `json:"minReplicas,omitempty"` + MaxReplicas *int32 `json:"maxReplicas,omitempty"` + Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"` + Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"` +} + +// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// apply. +func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { + return &HorizontalPodAutoscalerSpecApplyConfiguration{} +} + +// WithScaleTargetRef sets the ScaleTargetRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScaleTargetRef field is set to the value of the last call. +func (b *HorizontalPodAutoscalerSpecApplyConfiguration) WithScaleTargetRef(value *CrossVersionObjectReferenceApplyConfiguration) *HorizontalPodAutoscalerSpecApplyConfiguration { + b.ScaleTargetRef = value + return b +} + +// WithMinReplicas sets the MinReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinReplicas field is set to the value of the last call. +func (b *HorizontalPodAutoscalerSpecApplyConfiguration) WithMinReplicas(value int32) *HorizontalPodAutoscalerSpecApplyConfiguration { + b.MinReplicas = &value + return b +} + +// WithMaxReplicas sets the MaxReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxReplicas field is set to the value of the last call. +func (b *HorizontalPodAutoscalerSpecApplyConfiguration) WithMaxReplicas(value int32) *HorizontalPodAutoscalerSpecApplyConfiguration { + b.MaxReplicas = &value + return b +} + +// WithMetrics adds the given value to the Metrics field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Metrics field. +func (b *HorizontalPodAutoscalerSpecApplyConfiguration) WithMetrics(values ...*MetricSpecApplyConfiguration) *HorizontalPodAutoscalerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMetrics") + } + b.Metrics = append(b.Metrics, *values[i]) + } + return b +} + +// WithBehavior sets the Behavior field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Behavior field is set to the value of the last call. +func (b *HorizontalPodAutoscalerSpecApplyConfiguration) WithBehavior(value *HorizontalPodAutoscalerBehaviorApplyConfiguration) *HorizontalPodAutoscalerSpecApplyConfiguration { + b.Behavior = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go new file mode 100644 index 000000000..d4d551df8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go @@ -0,0 +1,98 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// with apply. +type HorizontalPodAutoscalerStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"` + CurrentReplicas *int32 `json:"currentReplicas,omitempty"` + DesiredReplicas *int32 `json:"desiredReplicas,omitempty"` + CurrentMetrics []MetricStatusApplyConfiguration `json:"currentMetrics,omitempty"` + Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// apply. +func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { + return &HorizontalPodAutoscalerStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithObservedGeneration(value int64) *HorizontalPodAutoscalerStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithLastScaleTime sets the LastScaleTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastScaleTime field is set to the value of the last call. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithLastScaleTime(value v1.Time) *HorizontalPodAutoscalerStatusApplyConfiguration { + b.LastScaleTime = &value + return b +} + +// WithCurrentReplicas sets the CurrentReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CurrentReplicas field is set to the value of the last call. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithCurrentReplicas(value int32) *HorizontalPodAutoscalerStatusApplyConfiguration { + b.CurrentReplicas = &value + return b +} + +// WithDesiredReplicas sets the DesiredReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DesiredReplicas field is set to the value of the last call. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithDesiredReplicas(value int32) *HorizontalPodAutoscalerStatusApplyConfiguration { + b.DesiredReplicas = &value + return b +} + +// WithCurrentMetrics adds the given value to the CurrentMetrics field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CurrentMetrics field. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithCurrentMetrics(values ...*MetricStatusApplyConfiguration) *HorizontalPodAutoscalerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithCurrentMetrics") + } + b.CurrentMetrics = append(b.CurrentMetrics, *values[i]) + } + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithConditions(values ...*HorizontalPodAutoscalerConditionApplyConfiguration) *HorizontalPodAutoscalerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go new file mode 100644 index 000000000..139f0fb5c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" +) + +// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use +// with apply. +type HPAScalingPolicyApplyConfiguration struct { + Type *v2.HPAScalingPolicyType `json:"type,omitempty"` + Value *int32 `json:"value,omitempty"` + PeriodSeconds *int32 `json:"periodSeconds,omitempty"` +} + +// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with +// apply. +func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { + return &HPAScalingPolicyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *HPAScalingPolicyApplyConfiguration) WithType(value v2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration { + b.Type = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *HPAScalingPolicyApplyConfiguration) WithValue(value int32) *HPAScalingPolicyApplyConfiguration { + b.Value = &value + return b +} + +// WithPeriodSeconds sets the PeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PeriodSeconds field is set to the value of the last call. +func (b *HPAScalingPolicyApplyConfiguration) WithPeriodSeconds(value int32) *HPAScalingPolicyApplyConfiguration { + b.PeriodSeconds = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go new file mode 100644 index 000000000..e768076aa --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" +) + +// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use +// with apply. +type HPAScalingRulesApplyConfiguration struct { + StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` + SelectPolicy *v2.ScalingPolicySelect `json:"selectPolicy,omitempty"` + Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` +} + +// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with +// apply. +func HPAScalingRules() *HPAScalingRulesApplyConfiguration { + return &HPAScalingRulesApplyConfiguration{} +} + +// WithStabilizationWindowSeconds sets the StabilizationWindowSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StabilizationWindowSeconds field is set to the value of the last call. +func (b *HPAScalingRulesApplyConfiguration) WithStabilizationWindowSeconds(value int32) *HPAScalingRulesApplyConfiguration { + b.StabilizationWindowSeconds = &value + return b +} + +// WithSelectPolicy sets the SelectPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelectPolicy field is set to the value of the last call. +func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value v2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration { + b.SelectPolicy = &value + return b +} + +// WithPolicies adds the given value to the Policies field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Policies field. +func (b *HPAScalingRulesApplyConfiguration) WithPolicies(values ...*HPAScalingPolicyApplyConfiguration) *HPAScalingRulesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPolicies") + } + b.Policies = append(b.Policies, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go new file mode 100644 index 000000000..312ad3ddd --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use +// with apply. +type MetricIdentifierApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` +} + +// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with +// apply. +func MetricIdentifier() *MetricIdentifierApplyConfiguration { + return &MetricIdentifierApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MetricIdentifierApplyConfiguration) WithName(value string) *MetricIdentifierApplyConfiguration { + b.Name = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *MetricIdentifierApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *MetricIdentifierApplyConfiguration { + b.Selector = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go new file mode 100644 index 000000000..094ead6c1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" +) + +// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// with apply. +type MetricSpecApplyConfiguration struct { + Type *v2.MetricSourceType `json:"type,omitempty"` + Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"` + Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"` + Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"` + ContainerResource *ContainerResourceMetricSourceApplyConfiguration `json:"containerResource,omitempty"` + External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` +} + +// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// apply. +func MetricSpec() *MetricSpecApplyConfiguration { + return &MetricSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithType(value v2.MetricSourceType) *MetricSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithObject sets the Object field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Object field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithObject(value *ObjectMetricSourceApplyConfiguration) *MetricSpecApplyConfiguration { + b.Object = value + return b +} + +// WithPods sets the Pods field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pods field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithPods(value *PodsMetricSourceApplyConfiguration) *MetricSpecApplyConfiguration { + b.Pods = value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithResource(value *ResourceMetricSourceApplyConfiguration) *MetricSpecApplyConfiguration { + b.Resource = value + return b +} + +// WithContainerResource sets the ContainerResource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContainerResource field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithContainerResource(value *ContainerResourceMetricSourceApplyConfiguration) *MetricSpecApplyConfiguration { + b.ContainerResource = value + return b +} + +// WithExternal sets the External field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the External field is set to the value of the last call. +func (b *MetricSpecApplyConfiguration) WithExternal(value *ExternalMetricSourceApplyConfiguration) *MetricSpecApplyConfiguration { + b.External = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go new file mode 100644 index 000000000..c65ad446f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" +) + +// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// with apply. +type MetricStatusApplyConfiguration struct { + Type *v2.MetricSourceType `json:"type,omitempty"` + Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"` + Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"` + Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"` + ContainerResource *ContainerResourceMetricStatusApplyConfiguration `json:"containerResource,omitempty"` + External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` +} + +// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// apply. +func MetricStatus() *MetricStatusApplyConfiguration { + return &MetricStatusApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithType(value v2.MetricSourceType) *MetricStatusApplyConfiguration { + b.Type = &value + return b +} + +// WithObject sets the Object field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Object field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithObject(value *ObjectMetricStatusApplyConfiguration) *MetricStatusApplyConfiguration { + b.Object = value + return b +} + +// WithPods sets the Pods field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pods field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithPods(value *PodsMetricStatusApplyConfiguration) *MetricStatusApplyConfiguration { + b.Pods = value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithResource(value *ResourceMetricStatusApplyConfiguration) *MetricStatusApplyConfiguration { + b.Resource = value + return b +} + +// WithContainerResource sets the ContainerResource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContainerResource field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithContainerResource(value *ContainerResourceMetricStatusApplyConfiguration) *MetricStatusApplyConfiguration { + b.ContainerResource = value + return b +} + +// WithExternal sets the External field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the External field is set to the value of the last call. +func (b *MetricStatusApplyConfiguration) WithExternal(value *ExternalMetricStatusApplyConfiguration) *MetricStatusApplyConfiguration { + b.External = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go new file mode 100644 index 000000000..f301e4d2b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use +// with apply. +type MetricTargetApplyConfiguration struct { + Type *v2.MetricTargetType `json:"type,omitempty"` + Value *resource.Quantity `json:"value,omitempty"` + AverageValue *resource.Quantity `json:"averageValue,omitempty"` + AverageUtilization *int32 `json:"averageUtilization,omitempty"` +} + +// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with +// apply. +func MetricTarget() *MetricTargetApplyConfiguration { + return &MetricTargetApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *MetricTargetApplyConfiguration) WithType(value v2.MetricTargetType) *MetricTargetApplyConfiguration { + b.Type = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *MetricTargetApplyConfiguration) WithValue(value resource.Quantity) *MetricTargetApplyConfiguration { + b.Value = &value + return b +} + +// WithAverageValue sets the AverageValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AverageValue field is set to the value of the last call. +func (b *MetricTargetApplyConfiguration) WithAverageValue(value resource.Quantity) *MetricTargetApplyConfiguration { + b.AverageValue = &value + return b +} + +// WithAverageUtilization sets the AverageUtilization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AverageUtilization field is set to the value of the last call. +func (b *MetricTargetApplyConfiguration) WithAverageUtilization(value int32) *MetricTargetApplyConfiguration { + b.AverageUtilization = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go new file mode 100644 index 000000000..e8474b189 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use +// with apply. +type MetricValueStatusApplyConfiguration struct { + Value *resource.Quantity `json:"value,omitempty"` + AverageValue *resource.Quantity `json:"averageValue,omitempty"` + AverageUtilization *int32 `json:"averageUtilization,omitempty"` +} + +// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with +// apply. +func MetricValueStatus() *MetricValueStatusApplyConfiguration { + return &MetricValueStatusApplyConfiguration{} +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *MetricValueStatusApplyConfiguration) WithValue(value resource.Quantity) *MetricValueStatusApplyConfiguration { + b.Value = &value + return b +} + +// WithAverageValue sets the AverageValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AverageValue field is set to the value of the last call. +func (b *MetricValueStatusApplyConfiguration) WithAverageValue(value resource.Quantity) *MetricValueStatusApplyConfiguration { + b.AverageValue = &value + return b +} + +// WithAverageUtilization sets the AverageUtilization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AverageUtilization field is set to the value of the last call. +func (b *MetricValueStatusApplyConfiguration) WithAverageUtilization(value int32) *MetricValueStatusApplyConfiguration { + b.AverageUtilization = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go new file mode 100644 index 000000000..a9482565e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// with apply. +type ObjectMetricSourceApplyConfiguration struct { + DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` +} + +// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// apply. +func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { + return &ObjectMetricSourceApplyConfiguration{} +} + +// WithDescribedObject sets the DescribedObject field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DescribedObject field is set to the value of the last call. +func (b *ObjectMetricSourceApplyConfiguration) WithDescribedObject(value *CrossVersionObjectReferenceApplyConfiguration) *ObjectMetricSourceApplyConfiguration { + b.DescribedObject = value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *ObjectMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *ObjectMetricSourceApplyConfiguration { + b.Target = value + return b +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *ObjectMetricSourceApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *ObjectMetricSourceApplyConfiguration { + b.Metric = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go new file mode 100644 index 000000000..70ba43bed --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// with apply. +type ObjectMetricStatusApplyConfiguration struct { + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` + Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` + DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` +} + +// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// apply. +func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { + return &ObjectMetricStatusApplyConfiguration{} +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *ObjectMetricStatusApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *ObjectMetricStatusApplyConfiguration { + b.Metric = value + return b +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *ObjectMetricStatusApplyConfiguration) WithCurrent(value *MetricValueStatusApplyConfiguration) *ObjectMetricStatusApplyConfiguration { + b.Current = value + return b +} + +// WithDescribedObject sets the DescribedObject field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DescribedObject field is set to the value of the last call. +func (b *ObjectMetricStatusApplyConfiguration) WithDescribedObject(value *CrossVersionObjectReferenceApplyConfiguration) *ObjectMetricStatusApplyConfiguration { + b.DescribedObject = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go new file mode 100644 index 000000000..86601cc48 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" +) + +// PodResourceMetricSourceApplyConfiguration represents an declarative configuration of the PodResourceMetricSource type for use +// with apply. +type PodResourceMetricSourceApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` +} + +// PodResourceMetricSourceApplyConfiguration constructs an declarative configuration of the PodResourceMetricSource type for use with +// apply. +func PodResourceMetricSource() *PodResourceMetricSourceApplyConfiguration { + return &PodResourceMetricSourceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *PodResourceMetricSourceApplyConfiguration { + b.Name = &value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *PodResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *PodResourceMetricSourceApplyConfiguration { + b.Target = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go new file mode 100644 index 000000000..0a7a5c259 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// with apply. +type PodsMetricSourceApplyConfiguration struct { + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` +} + +// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// apply. +func PodsMetricSource() *PodsMetricSourceApplyConfiguration { + return &PodsMetricSourceApplyConfiguration{} +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *PodsMetricSourceApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *PodsMetricSourceApplyConfiguration { + b.Metric = value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *PodsMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *PodsMetricSourceApplyConfiguration { + b.Target = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go new file mode 100644 index 000000000..865fcc33e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// with apply. +type PodsMetricStatusApplyConfiguration struct { + Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` + Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` +} + +// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// apply. +func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { + return &PodsMetricStatusApplyConfiguration{} +} + +// WithMetric sets the Metric field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Metric field is set to the value of the last call. +func (b *PodsMetricStatusApplyConfiguration) WithMetric(value *MetricIdentifierApplyConfiguration) *PodsMetricStatusApplyConfiguration { + b.Metric = value + return b +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *PodsMetricStatusApplyConfiguration) WithCurrent(value *MetricValueStatusApplyConfiguration) *PodsMetricStatusApplyConfiguration { + b.Current = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go new file mode 100644 index 000000000..25a065fef --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// with apply. +type ResourceMetricSourceApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Target *MetricTargetApplyConfiguration `json:"target,omitempty"` +} + +// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// apply. +func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { + return &ResourceMetricSourceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *ResourceMetricSourceApplyConfiguration { + b.Name = &value + return b +} + +// WithTarget sets the Target field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Target field is set to the value of the last call. +func (b *ResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *ResourceMetricSourceApplyConfiguration { + b.Target = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go new file mode 100644 index 000000000..fb5625afa --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// with apply. +type ResourceMetricStatusApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` +} + +// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// apply. +func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { + return &ResourceMetricStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceMetricStatusApplyConfiguration) WithName(value v1.ResourceName) *ResourceMetricStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *ResourceMetricStatusApplyConfiguration) WithCurrent(value *MetricValueStatusApplyConfiguration) *ResourceMetricStatusApplyConfiguration { + b.Current = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go index ba7e27e08..a36d5d0ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go @@ -33,6 +33,7 @@ type JobStatusApplyConfiguration struct { Failed *int32 `json:"failed,omitempty"` CompletedIndexes *string `json:"completedIndexes,omitempty"` UncountedTerminatedPods *UncountedTerminatedPodsApplyConfiguration `json:"uncountedTerminatedPods,omitempty"` + Ready *int32 `json:"ready,omitempty"` } // JobStatusApplyConfiguration constructs an declarative configuration of the JobStatus type for use with @@ -109,3 +110,11 @@ func (b *JobStatusApplyConfiguration) WithUncountedTerminatedPods(value *Uncount b.UncountedTerminatedPods = value return b } + +// WithReady sets the Ready field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ready field is set to the value of the last call. +func (b *JobStatusApplyConfiguration) WithReady(value int32) *JobStatusApplyConfiguration { + b.Ready = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go new file mode 100644 index 000000000..f94e55937 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GRPCActionApplyConfiguration represents an declarative configuration of the GRPCAction type for use +// with apply. +type GRPCActionApplyConfiguration struct { + Port *int32 `json:"port,omitempty"` + Service *string `json:"service,omitempty"` +} + +// GRPCActionApplyConfiguration constructs an declarative configuration of the GRPCAction type for use with +// apply. +func GRPCAction() *GRPCActionApplyConfiguration { + return &GRPCActionApplyConfiguration{} +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *GRPCActionApplyConfiguration) WithPort(value int32) *GRPCActionApplyConfiguration { + b.Port = &value + return b +} + +// WithService sets the Service field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Service field is set to the value of the last call. +func (b *GRPCActionApplyConfiguration) WithService(value string) *GRPCActionApplyConfiguration { + b.Service = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go index ab37b6677..db9abf8af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go @@ -21,8 +21,8 @@ package v1 // LifecycleApplyConfiguration represents an declarative configuration of the Lifecycle type for use // with apply. type LifecycleApplyConfiguration struct { - PostStart *HandlerApplyConfiguration `json:"postStart,omitempty"` - PreStop *HandlerApplyConfiguration `json:"preStop,omitempty"` + PostStart *LifecycleHandlerApplyConfiguration `json:"postStart,omitempty"` + PreStop *LifecycleHandlerApplyConfiguration `json:"preStop,omitempty"` } // LifecycleApplyConfiguration constructs an declarative configuration of the Lifecycle type for use with @@ -34,7 +34,7 @@ func Lifecycle() *LifecycleApplyConfiguration { // WithPostStart sets the PostStart field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PostStart field is set to the value of the last call. -func (b *LifecycleApplyConfiguration) WithPostStart(value *HandlerApplyConfiguration) *LifecycleApplyConfiguration { +func (b *LifecycleApplyConfiguration) WithPostStart(value *LifecycleHandlerApplyConfiguration) *LifecycleApplyConfiguration { b.PostStart = value return b } @@ -42,7 +42,7 @@ func (b *LifecycleApplyConfiguration) WithPostStart(value *HandlerApplyConfigura // WithPreStop sets the PreStop field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PreStop field is set to the value of the last call. -func (b *LifecycleApplyConfiguration) WithPreStop(value *HandlerApplyConfiguration) *LifecycleApplyConfiguration { +func (b *LifecycleApplyConfiguration) WithPreStop(value *LifecycleHandlerApplyConfiguration) *LifecycleApplyConfiguration { b.PreStop = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/handler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go similarity index 69% rename from vendor/k8s.io/client-go/applyconfigurations/core/v1/handler.go rename to vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go index fbf1511cc..6e373dd4e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/handler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go @@ -18,24 +18,24 @@ limitations under the License. package v1 -// HandlerApplyConfiguration represents an declarative configuration of the Handler type for use +// LifecycleHandlerApplyConfiguration represents an declarative configuration of the LifecycleHandler type for use // with apply. -type HandlerApplyConfiguration struct { +type LifecycleHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"` TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"` } -// HandlerApplyConfiguration constructs an declarative configuration of the Handler type for use with +// LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with // apply. -func Handler() *HandlerApplyConfiguration { - return &HandlerApplyConfiguration{} +func LifecycleHandler() *LifecycleHandlerApplyConfiguration { + return &LifecycleHandlerApplyConfiguration{} } // WithExec sets the Exec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Exec field is set to the value of the last call. -func (b *HandlerApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) *HandlerApplyConfiguration { +func (b *LifecycleHandlerApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { b.Exec = value return b } @@ -43,7 +43,7 @@ func (b *HandlerApplyConfiguration) WithExec(value *ExecActionApplyConfiguration // WithHTTPGet sets the HTTPGet field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTPGet field is set to the value of the last call. -func (b *HandlerApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfiguration) *HandlerApplyConfiguration { +func (b *LifecycleHandlerApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { b.HTTPGet = value return b } @@ -51,7 +51,7 @@ func (b *HandlerApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfigu // WithTCPSocket sets the TCPSocket field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TCPSocket field is set to the value of the last call. -func (b *HandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfiguration) *HandlerApplyConfiguration { +func (b *LifecycleHandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { b.TCPSocket = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index 711651e0b..4c38d89f5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -25,10 +25,12 @@ import ( // PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { - Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Capacity *v1.ResourceList `json:"capacity,omitempty"` - Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` + Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Capacity *v1.ResourceList `json:"capacity,omitempty"` + Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` + AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` + ResizeStatus *v1.PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty"` } // PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with @@ -75,3 +77,19 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithConditions(values .. } return b } + +// WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocatedResources field is set to the value of the last call. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(value v1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration { + b.AllocatedResources = &value + return b +} + +// WithResizeStatus sets the ResizeStatus field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResizeStatus field is set to the value of the last call. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithResizeStatus(value v1.PersistentVolumeClaimResizeStatus) *PersistentVolumeClaimStatusApplyConfiguration { + b.ResizeStatus = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go new file mode 100644 index 000000000..a5315d636 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// PodOSApplyConfiguration represents an declarative configuration of the PodOS type for use +// with apply. +type PodOSApplyConfiguration struct { + Name *v1.OSName `json:"name,omitempty"` +} + +// PodOSApplyConfiguration constructs an declarative configuration of the PodOS type for use with +// apply. +func PodOS() *PodOSApplyConfiguration { + return &PodOSApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodOSApplyConfiguration) WithName(value v1.OSName) *PodOSApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go index d1c9ea9cb..015859e9a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go @@ -60,6 +60,7 @@ type PodSpecApplyConfiguration struct { Overhead *corev1.ResourceList `json:"overhead,omitempty"` TopologySpreadConstraints []TopologySpreadConstraintApplyConfiguration `json:"topologySpreadConstraints,omitempty"` SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty"` + OS *PodOSApplyConfiguration `json:"os,omitempty"` } // PodSpecApplyConfiguration constructs an declarative configuration of the PodSpec type for use with @@ -398,3 +399,11 @@ func (b *PodSpecApplyConfiguration) WithSetHostnameAsFQDN(value bool) *PodSpecAp b.SetHostnameAsFQDN = &value return b } + +// WithOS sets the OS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OS field is set to the value of the last call. +func (b *PodSpecApplyConfiguration) WithOS(value *PodOSApplyConfiguration) *PodSpecApplyConfiguration { + b.OS = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go index f87adcd5f..10730557a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go @@ -21,13 +21,13 @@ package v1 // ProbeApplyConfiguration represents an declarative configuration of the Probe type for use // with apply. type ProbeApplyConfiguration struct { - HandlerApplyConfiguration `json:",inline"` - InitialDelaySeconds *int32 `json:"initialDelaySeconds,omitempty"` - TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` - PeriodSeconds *int32 `json:"periodSeconds,omitempty"` - SuccessThreshold *int32 `json:"successThreshold,omitempty"` - FailureThreshold *int32 `json:"failureThreshold,omitempty"` - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + ProbeHandlerApplyConfiguration `json:",inline"` + InitialDelaySeconds *int32 `json:"initialDelaySeconds,omitempty"` + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` + PeriodSeconds *int32 `json:"periodSeconds,omitempty"` + SuccessThreshold *int32 `json:"successThreshold,omitempty"` + FailureThreshold *int32 `json:"failureThreshold,omitempty"` + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` } // ProbeApplyConfiguration constructs an declarative configuration of the Probe type for use with @@ -60,6 +60,14 @@ func (b *ProbeApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfi return b } +// WithGRPC sets the GRPC field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GRPC field is set to the value of the last call. +func (b *ProbeApplyConfiguration) WithGRPC(value *GRPCActionApplyConfiguration) *ProbeApplyConfiguration { + b.GRPC = value + return b +} + // WithInitialDelaySeconds sets the InitialDelaySeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InitialDelaySeconds field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go new file mode 100644 index 000000000..54f3344ac --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProbeHandlerApplyConfiguration represents an declarative configuration of the ProbeHandler type for use +// with apply. +type ProbeHandlerApplyConfiguration struct { + Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` + HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"` + TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"` + GRPC *GRPCActionApplyConfiguration `json:"grpc,omitempty"` +} + +// ProbeHandlerApplyConfiguration constructs an declarative configuration of the ProbeHandler type for use with +// apply. +func ProbeHandler() *ProbeHandlerApplyConfiguration { + return &ProbeHandlerApplyConfiguration{} +} + +// WithExec sets the Exec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exec field is set to the value of the last call. +func (b *ProbeHandlerApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) *ProbeHandlerApplyConfiguration { + b.Exec = value + return b +} + +// WithHTTPGet sets the HTTPGet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPGet field is set to the value of the last call. +func (b *ProbeHandlerApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfiguration) *ProbeHandlerApplyConfiguration { + b.HTTPGet = value + return b +} + +// WithTCPSocket sets the TCPSocket field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TCPSocket field is set to the value of the last call. +func (b *ProbeHandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfiguration) *ProbeHandlerApplyConfiguration { + b.TCPSocket = value + return b +} + +// WithGRPC sets the GRPC field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GRPC field is set to the value of the last call. +func (b *ProbeHandlerApplyConfiguration) WithGRPC(value *GRPCActionApplyConfiguration) *ProbeHandlerApplyConfiguration { + b.GRPC = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go new file mode 100644 index 000000000..924f966d4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" +) + +// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// with apply. +type FlowDistinguisherMethodApplyConfiguration struct { + Type *v1beta2.FlowDistinguisherMethodType `json:"type,omitempty"` +} + +// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// apply. +func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { + return &FlowDistinguisherMethodApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta2.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go new file mode 100644 index 000000000..323d7241d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go @@ -0,0 +1,274 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// with apply. +type FlowSchemaApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"` + Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` +} + +// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// apply. +func FlowSchema(name string) *FlowSchemaApplyConfiguration { + b := &FlowSchemaApplyConfiguration{} + b.WithName(name) + b.WithKind("FlowSchema") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2") + return b +} + +// ExtractFlowSchema extracts the applied configuration owned by fieldManager from +// flowSchema. If no managedFields are found in flowSchema for fieldManager, a +// FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API. +// ExtractFlowSchema provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractFlowSchema(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "") +} + +// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "status") +} + +func extractFlowSchema(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { + b := &FlowSchemaApplyConfiguration{} + err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta2.FlowSchema"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(flowSchema.Name) + + b.WithKind("FlowSchema") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithSelfLink(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.SelfLink = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +// WithClusterName sets the ClusterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterName field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithClusterName(value string) *FlowSchemaApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ClusterName = &value + return b +} + +func (b *FlowSchemaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithSpec(value *FlowSchemaSpecApplyConfiguration) *FlowSchemaApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyConfiguration) *FlowSchemaApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go new file mode 100644 index 000000000..04dfcbf11 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// with apply. +type FlowSchemaConditionApplyConfiguration struct { + Type *v1beta2.FlowSchemaConditionType `json:"type,omitempty"` + Status *v1beta2.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// apply. +func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { + return &FlowSchemaConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta2.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta2.ConditionStatus) *FlowSchemaConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *FlowSchemaConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithReason(value string) *FlowSchemaConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithMessage(value string) *FlowSchemaConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go new file mode 100644 index 000000000..a5477e276 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// with apply. +type FlowSchemaSpecApplyConfiguration struct { + PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` + MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"` + DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"` + Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` +} + +// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// apply. +func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { + return &FlowSchemaSpecApplyConfiguration{} +} + +// WithPriorityLevelConfiguration sets the PriorityLevelConfiguration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PriorityLevelConfiguration field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithPriorityLevelConfiguration(value *PriorityLevelConfigurationReferenceApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + b.PriorityLevelConfiguration = value + return b +} + +// WithMatchingPrecedence sets the MatchingPrecedence field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchingPrecedence field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithMatchingPrecedence(value int32) *FlowSchemaSpecApplyConfiguration { + b.MatchingPrecedence = &value + return b +} + +// WithDistinguisherMethod sets the DistinguisherMethod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DistinguisherMethod field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithDistinguisherMethod(value *FlowDistinguisherMethodApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + b.DistinguisherMethod = value + return b +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *FlowSchemaSpecApplyConfiguration) WithRules(values ...*PolicyRulesWithSubjectsApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go new file mode 100644 index 000000000..67c5be2cb --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// with apply. +type FlowSchemaStatusApplyConfiguration struct { + Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// apply. +func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { + return &FlowSchemaStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *FlowSchemaStatusApplyConfiguration) WithConditions(values ...*FlowSchemaConditionApplyConfiguration) *FlowSchemaStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go new file mode 100644 index 000000000..b670f2cfd --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// with apply. +type GroupSubjectApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// apply. +func GroupSubject() *GroupSubjectApplyConfiguration { + return &GroupSubjectApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GroupSubjectApplyConfiguration) WithName(value string) *GroupSubjectApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go new file mode 100644 index 000000000..e25f7f6b0 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// with apply. +type LimitedPriorityLevelConfigurationApplyConfiguration struct { + AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` + LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` +} + +// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// apply. +func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { + return &LimitedPriorityLevelConfigurationApplyConfiguration{} +} + +// WithAssuredConcurrencyShares sets the AssuredConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AssuredConcurrencyShares field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithAssuredConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.AssuredConcurrencyShares = &value + return b +} + +// WithLimitResponse sets the LimitResponse field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LimitResponse field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLimitResponse(value *LimitResponseApplyConfiguration) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LimitResponse = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go new file mode 100644 index 000000000..a9b7661fb --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" +) + +// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// with apply. +type LimitResponseApplyConfiguration struct { + Type *v1beta2.LimitResponseType `json:"type,omitempty"` + Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` +} + +// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// apply. +func LimitResponse() *LimitResponseApplyConfiguration { + return &LimitResponseApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *LimitResponseApplyConfiguration) WithType(value v1beta2.LimitResponseType) *LimitResponseApplyConfiguration { + b.Type = &value + return b +} + +// WithQueuing sets the Queuing field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Queuing field is set to the value of the last call. +func (b *LimitResponseApplyConfiguration) WithQueuing(value *QueuingConfigurationApplyConfiguration) *LimitResponseApplyConfiguration { + b.Queuing = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go new file mode 100644 index 000000000..cb8ba0afd --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// with apply. +type NonResourcePolicyRuleApplyConfiguration struct { + Verbs []string `json:"verbs,omitempty"` + NonResourceURLs []string `json:"nonResourceURLs,omitempty"` +} + +// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// apply. +func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { + return &NonResourcePolicyRuleApplyConfiguration{} +} + +// WithVerbs adds the given value to the Verbs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Verbs field. +func (b *NonResourcePolicyRuleApplyConfiguration) WithVerbs(values ...string) *NonResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Verbs = append(b.Verbs, values[i]) + } + return b +} + +// WithNonResourceURLs adds the given value to the NonResourceURLs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NonResourceURLs field. +func (b *NonResourcePolicyRuleApplyConfiguration) WithNonResourceURLs(values ...string) *NonResourcePolicyRuleApplyConfiguration { + for i := range values { + b.NonResourceURLs = append(b.NonResourceURLs, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go new file mode 100644 index 000000000..179c3979d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// with apply. +type PolicyRulesWithSubjectsApplyConfiguration struct { + Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` + ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"` + NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` +} + +// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// apply. +func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { + return &PolicyRulesWithSubjectsApplyConfiguration{} +} + +// WithSubjects adds the given value to the Subjects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Subjects field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithSubjects(values ...*SubjectApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSubjects") + } + b.Subjects = append(b.Subjects, *values[i]) + } + return b +} + +// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceRules field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithResourceRules(values ...*ResourcePolicyRuleApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceRules") + } + b.ResourceRules = append(b.ResourceRules, *values[i]) + } + return b +} + +// WithNonResourceRules adds the given value to the NonResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NonResourceRules field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithNonResourceRules(values ...*NonResourcePolicyRuleApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithNonResourceRules") + } + b.NonResourceRules = append(b.NonResourceRules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go new file mode 100644 index 000000000..4ac11bba6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -0,0 +1,274 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// with apply. +type PriorityLevelConfigurationApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"` + Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` +} + +// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// apply. +func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { + b := &PriorityLevelConfigurationApplyConfiguration{} + b.WithName(name) + b.WithKind("PriorityLevelConfiguration") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2") + return b +} + +// ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from +// priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a +// PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API. +// ExtractPriorityLevelConfiguration provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") +} + +// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") +} + +func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { + b := &PriorityLevelConfigurationApplyConfiguration{} + err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(priorityLevelConfiguration.Name) + + b.WithKind("PriorityLevelConfiguration") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithSelfLink(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.SelfLink = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +// WithClusterName sets the ClusterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterName field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithClusterName(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ClusterName = &value + return b +} + +func (b *PriorityLevelConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithSpec(value *PriorityLevelConfigurationSpecApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *PriorityLevelConfigurationStatusApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go new file mode 100644 index 000000000..f742adeff --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// with apply. +type PriorityLevelConfigurationConditionApplyConfiguration struct { + Type *v1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *v1beta2.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// apply. +func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { + return &PriorityLevelConfigurationConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta2.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta2.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *PriorityLevelConfigurationConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithReason(value string) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithMessage(value string) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go new file mode 100644 index 000000000..581b451ff --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// with apply. +type PriorityLevelConfigurationReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// apply. +func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { + return &PriorityLevelConfigurationReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PriorityLevelConfigurationReferenceApplyConfiguration) WithName(value string) *PriorityLevelConfigurationReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go new file mode 100644 index 000000000..5560ed9e5 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" +) + +// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// with apply. +type PriorityLevelConfigurationSpecApplyConfiguration struct { + Type *v1beta2.PriorityLevelEnablement `json:"type,omitempty"` + Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` +} + +// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// apply. +func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { + return &PriorityLevelConfigurationSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta2.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithLimited sets the Limited field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Limited field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *LimitedPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Limited = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go new file mode 100644 index 000000000..b55e32be0 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// with apply. +type PriorityLevelConfigurationStatusApplyConfiguration struct { + Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// apply. +func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { + return &PriorityLevelConfigurationStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *PriorityLevelConfigurationStatusApplyConfiguration) WithConditions(values ...*PriorityLevelConfigurationConditionApplyConfiguration) *PriorityLevelConfigurationStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go new file mode 100644 index 000000000..06246fb27 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// with apply. +type QueuingConfigurationApplyConfiguration struct { + Queues *int32 `json:"queues,omitempty"` + HandSize *int32 `json:"handSize,omitempty"` + QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` +} + +// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// apply. +func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { + return &QueuingConfigurationApplyConfiguration{} +} + +// WithQueues sets the Queues field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Queues field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithQueues(value int32) *QueuingConfigurationApplyConfiguration { + b.Queues = &value + return b +} + +// WithHandSize sets the HandSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HandSize field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithHandSize(value int32) *QueuingConfigurationApplyConfiguration { + b.HandSize = &value + return b +} + +// WithQueueLengthLimit sets the QueueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the QueueLengthLimit field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithQueueLengthLimit(value int32) *QueuingConfigurationApplyConfiguration { + b.QueueLengthLimit = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go new file mode 100644 index 000000000..b67ea1c7f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go @@ -0,0 +1,83 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// with apply. +type ResourcePolicyRuleApplyConfiguration struct { + Verbs []string `json:"verbs,omitempty"` + APIGroups []string `json:"apiGroups,omitempty"` + Resources []string `json:"resources,omitempty"` + ClusterScope *bool `json:"clusterScope,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` +} + +// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// apply. +func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { + return &ResourcePolicyRuleApplyConfiguration{} +} + +// WithVerbs adds the given value to the Verbs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Verbs field. +func (b *ResourcePolicyRuleApplyConfiguration) WithVerbs(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Verbs = append(b.Verbs, values[i]) + } + return b +} + +// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIGroups field. +func (b *ResourcePolicyRuleApplyConfiguration) WithAPIGroups(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.APIGroups = append(b.APIGroups, values[i]) + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *ResourcePolicyRuleApplyConfiguration) WithResources(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Resources = append(b.Resources, values[i]) + } + return b +} + +// WithClusterScope sets the ClusterScope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterScope field is set to the value of the last call. +func (b *ResourcePolicyRuleApplyConfiguration) WithClusterScope(value bool) *ResourcePolicyRuleApplyConfiguration { + b.ClusterScope = &value + return b +} + +// WithNamespaces adds the given value to the Namespaces field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Namespaces field. +func (b *ResourcePolicyRuleApplyConfiguration) WithNamespaces(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Namespaces = append(b.Namespaces, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go new file mode 100644 index 000000000..b6cfdcad3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// with apply. +type ServiceAccountSubjectApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// apply. +func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { + return &ServiceAccountSubjectApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceAccountSubjectApplyConfiguration) WithNamespace(value string) *ServiceAccountSubjectApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceAccountSubjectApplyConfiguration) WithName(value string) *ServiceAccountSubjectApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go new file mode 100644 index 000000000..7030785b8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" +) + +// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// with apply. +type SubjectApplyConfiguration struct { + Kind *v1beta2.SubjectKind `json:"kind,omitempty"` + User *UserSubjectApplyConfiguration `json:"user,omitempty"` + Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` + ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` +} + +// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// apply. +func Subject() *SubjectApplyConfiguration { + return &SubjectApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithKind(value v1beta2.SubjectKind) *SubjectApplyConfiguration { + b.Kind = &value + return b +} + +// WithUser sets the User field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the User field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithUser(value *UserSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.User = value + return b +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithGroup(value *GroupSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.Group = value + return b +} + +// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccount field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithServiceAccount(value *ServiceAccountSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.ServiceAccount = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go new file mode 100644 index 000000000..8c77b3e8a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// with apply. +type UserSubjectApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// apply. +func UserSubject() *UserSubjectApplyConfiguration { + return &UserSubjectApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *UserSubjectApplyConfiguration) WithName(value string) *UserSubjectApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 5960666fd..824c5e958 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -482,8 +482,8 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: atomic map: elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic + namedType: __untyped_deduced_ + elementRelationship: separable - name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus map: fields: @@ -907,12 +907,24 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy + map: + fields: + - name: whenDeleted + type: + scalar: string + - name: whenScaled + type: + scalar: string - name: io.k8s.api.apps.v1.StatefulSetSpec map: fields: - name: minReadySeconds type: scalar: numeric + - name: persistentVolumeClaimRetentionPolicy + type: + namedType: io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy - name: podManagementPolicy type: scalar: string @@ -949,6 +961,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: availableReplicas type: scalar: numeric + default: 0 - name: collisionCount type: scalar: numeric @@ -1194,12 +1207,24 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy + map: + fields: + - name: whenDeleted + type: + scalar: string + - name: whenScaled + type: + scalar: string - name: io.k8s.api.apps.v1beta1.StatefulSetSpec map: fields: - name: minReadySeconds type: scalar: numeric + - name: persistentVolumeClaimRetentionPolicy + type: + namedType: io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy - name: podManagementPolicy type: scalar: string @@ -1236,6 +1261,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: availableReplicas type: scalar: numeric + default: 0 - name: collisionCount type: scalar: numeric @@ -1679,12 +1705,24 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy + map: + fields: + - name: whenDeleted + type: + scalar: string + - name: whenScaled + type: + scalar: string - name: io.k8s.api.apps.v1beta2.StatefulSetSpec map: fields: - name: minReadySeconds type: scalar: numeric + - name: persistentVolumeClaimRetentionPolicy + type: + namedType: io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy - name: podManagementPolicy type: scalar: string @@ -1721,6 +1759,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: availableReplicas type: scalar: numeric + default: 0 - name: collisionCount type: scalar: numeric @@ -1836,6 +1875,362 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric +- name: io.k8s.api.autoscaling.v2.ContainerResourceMetricSource + map: + fields: + - name: container + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: target + type: + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus + map: + fields: + - name: container + type: + scalar: string + default: "" + - name: current + type: + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2.ExternalMetricSource + map: + fields: + - name: metric + type: + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} + - name: target + type: + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.ExternalMetricStatus + map: + fields: + - name: current + type: + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: metric + type: + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} +- name: io.k8s.api.autoscaling.v2.HPAScalingPolicy + map: + fields: + - name: periodSeconds + type: + scalar: numeric + default: 0 + - name: type + type: + scalar: string + default: "" + - name: value + type: + scalar: numeric + default: 0 +- name: io.k8s.api.autoscaling.v2.HPAScalingRules + map: + fields: + - name: policies + type: + list: + elementType: + namedType: io.k8s.api.autoscaling.v2.HPAScalingPolicy + elementRelationship: atomic + - name: selectPolicy + type: + scalar: string + - name: stabilizationWindowSeconds + type: + scalar: numeric +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec + default: {} + - name: status + type: + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus + default: {} +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior + map: + fields: + - name: scaleDown + type: + namedType: io.k8s.api.autoscaling.v2.HPAScalingRules + - name: scaleUp + type: + namedType: io.k8s.api.autoscaling.v2.HPAScalingRules +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec + map: + fields: + - name: behavior + type: + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior + - name: maxReplicas + type: + scalar: numeric + default: 0 + - name: metrics + type: + list: + elementType: + namedType: io.k8s.api.autoscaling.v2.MetricSpec + elementRelationship: atomic + - name: minReplicas + type: + scalar: numeric + - name: scaleTargetRef + type: + namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + default: {} +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition + elementRelationship: associative + keys: + - type + - name: currentMetrics + type: + list: + elementType: + namedType: io.k8s.api.autoscaling.v2.MetricStatus + elementRelationship: atomic + - name: currentReplicas + type: + scalar: numeric + - name: desiredReplicas + type: + scalar: numeric + default: 0 + - name: lastScaleTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: observedGeneration + type: + scalar: numeric +- name: io.k8s.api.autoscaling.v2.MetricIdentifier + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: io.k8s.api.autoscaling.v2.MetricSpec + map: + fields: + - name: containerResource + type: + namedType: io.k8s.api.autoscaling.v2.ContainerResourceMetricSource + - name: external + type: + namedType: io.k8s.api.autoscaling.v2.ExternalMetricSource + - name: object + type: + namedType: io.k8s.api.autoscaling.v2.ObjectMetricSource + - name: pods + type: + namedType: io.k8s.api.autoscaling.v2.PodsMetricSource + - name: resource + type: + namedType: io.k8s.api.autoscaling.v2.ResourceMetricSource + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2.MetricStatus + map: + fields: + - name: containerResource + type: + namedType: io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus + - name: external + type: + namedType: io.k8s.api.autoscaling.v2.ExternalMetricStatus + - name: object + type: + namedType: io.k8s.api.autoscaling.v2.ObjectMetricStatus + - name: pods + type: + namedType: io.k8s.api.autoscaling.v2.PodsMetricStatus + - name: resource + type: + namedType: io.k8s.api.autoscaling.v2.ResourceMetricStatus + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2.MetricTarget + map: + fields: + - name: averageUtilization + type: + scalar: numeric + - name: averageValue + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: type + type: + scalar: string + default: "" + - name: value + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2.MetricValueStatus + map: + fields: + - name: averageUtilization + type: + scalar: numeric + - name: averageValue + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: value + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2.ObjectMetricSource + map: + fields: + - name: describedObject + type: + namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + default: {} + - name: metric + type: + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} + - name: target + type: + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.ObjectMetricStatus + map: + fields: + - name: current + type: + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: describedObject + type: + namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + default: {} + - name: metric + type: + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} +- name: io.k8s.api.autoscaling.v2.PodsMetricSource + map: + fields: + - name: metric + type: + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} + - name: target + type: + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.PodsMetricStatus + map: + fields: + - name: current + type: + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: metric + type: + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} +- name: io.k8s.api.autoscaling.v2.ResourceMetricSource + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: target + type: + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.ResourceMetricStatus + map: + fields: + - name: current + type: + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource map: fields: @@ -2668,6 +3063,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: failed type: scalar: numeric + - name: ready + type: + scalar: numeric - name: startTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time @@ -3875,7 +4273,10 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: namedType: io.k8s.api.core.v1.ContainerPort - elementRelationship: atomic + elementRelationship: associative + keys: + - containerPort + - protocol - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe @@ -4113,6 +4514,17 @@ var schemaYAML = typed.YAMLObject(`types: - name: readOnly type: scalar: boolean +- name: io.k8s.api.core.v1.GRPCAction + map: + fields: + - name: port + type: + scalar: numeric + default: 0 + - name: service + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.GitRepoVolumeSource map: fields: @@ -4190,18 +4602,6 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.core.v1.Handler - map: - fields: - - name: exec - type: - namedType: io.k8s.api.core.v1.ExecAction - - name: httpGet - type: - namedType: io.k8s.api.core.v1.HTTPGetAction - - name: tcpSocket - type: - namedType: io.k8s.api.core.v1.TCPSocketAction - name: io.k8s.api.core.v1.HostAlias map: fields: @@ -4327,10 +4727,22 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: postStart type: - namedType: io.k8s.api.core.v1.Handler + namedType: io.k8s.api.core.v1.LifecycleHandler - name: preStop type: - namedType: io.k8s.api.core.v1.Handler + namedType: io.k8s.api.core.v1.LifecycleHandler +- name: io.k8s.api.core.v1.LifecycleHandler + map: + fields: + - name: exec + type: + namedType: io.k8s.api.core.v1.ExecAction + - name: httpGet + type: + namedType: io.k8s.api.core.v1.HTTPGetAction + - name: tcpSocket + type: + namedType: io.k8s.api.core.v1.TCPSocketAction - name: io.k8s.api.core.v1.LimitRange map: fields: @@ -4926,6 +5338,11 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: allocatedResources + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: capacity type: map: @@ -4942,6 +5359,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: phase type: scalar: string + - name: resizeStatus + type: + scalar: string - name: io.k8s.api.core.v1.PersistentVolumeClaimTemplate map: fields: @@ -5217,6 +5637,13 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string +- name: io.k8s.api.core.v1.PodOS + map: + fields: + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.PodReadinessGate map: fields: @@ -5345,6 +5772,9 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: os + type: + namedType: io.k8s.api.core.v1.PodOS - name: overhead type: map: @@ -5554,6 +5984,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: failureThreshold type: scalar: numeric + - name: grpc + type: + namedType: io.k8s.api.core.v1.GRPCAction - name: httpGet type: namedType: io.k8s.api.core.v1.HTTPGetAction @@ -8395,6 +8828,314 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod + map: + fields: + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchema + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec + default: {} + - name: status + type: + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus + default: {} +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + - name: type + type: + scalar: string +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec + map: + fields: + - name: distinguisherMethod + type: + namedType: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod + - name: matchingPrecedence + type: + scalar: numeric + default: 0 + - name: priorityLevelConfiguration + type: + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference + default: {} + - name: rules + type: + list: + elementType: + namedType: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects + elementRelationship: atomic +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.flowcontrol.v1beta2.GroupSubject + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1beta2.LimitResponse + map: + fields: + - name: queuing + type: + namedType: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: queuing + discriminatorValue: Queuing +- name: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration + map: + fields: + - name: assuredConcurrencyShares + type: + scalar: numeric + default: 0 + - name: limitResponse + type: + namedType: io.k8s.api.flowcontrol.v1beta2.LimitResponse + default: {} +- name: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule + map: + fields: + - name: nonResourceURLs + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: verbs + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects + map: + fields: + - name: nonResourceRules + type: + list: + elementType: + namedType: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule + elementRelationship: atomic + - name: resourceRules + type: + list: + elementType: + namedType: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule + elementRelationship: atomic + - name: subjects + type: + list: + elementType: + namedType: io.k8s.api.flowcontrol.v1beta2.Subject + elementRelationship: atomic +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec + default: {} + - name: status + type: + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus + default: {} +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + - name: type + type: + scalar: string +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec + map: + fields: + - name: limited + type: + namedType: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: limited + discriminatorValue: Limited +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration + map: + fields: + - name: handSize + type: + scalar: numeric + default: 0 + - name: queueLengthLimit + type: + scalar: numeric + default: 0 + - name: queues + type: + scalar: numeric + default: 0 +- name: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule + map: + fields: + - name: apiGroups + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: clusterScope + type: + scalar: boolean + - name: namespaces + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: resources + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: verbs + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1beta2.Subject + map: + fields: + - name: group + type: + namedType: io.k8s.api.flowcontrol.v1beta2.GroupSubject + - name: kind + type: + scalar: string + default: "" + - name: serviceAccount + type: + namedType: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject + - name: user + type: + namedType: io.k8s.api.flowcontrol.v1beta2.UserSubject + unions: + - discriminator: kind + fields: + - fieldName: group + discriminatorValue: Group + - fieldName: serviceAccount + discriminatorValue: ServiceAccount + - fieldName: user + discriminatorValue: User +- name: io.k8s.api.flowcontrol.v1beta2.UserSubject + map: + fields: + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.imagepolicy.v1alpha1.ImageReview map: fields: @@ -10647,8 +11388,8 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: atomic map: elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic + namedType: __untyped_deduced_ + elementRelationship: separable - name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector map: fields: @@ -10821,8 +11562,8 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: atomic map: elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic + namedType: __untyped_deduced_ + elementRelationship: separable - name: io.k8s.apimachinery.pkg.util.intstr.IntOrString scalar: untyped - name: __untyped_atomic_ diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go new file mode 100644 index 000000000..8a58d9e87 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go @@ -0,0 +1,137 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/client-go/discovery" + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// openAPISchemaTTL is how frequently we need to check +// whether the open API schema has changed or not. +const openAPISchemaTTL = time.Minute + +// UnstructuredExtractor enables extracting the applied configuration state from object for fieldManager into an +// unstructured object type. +type UnstructuredExtractor interface { + Extract(object *unstructured.Unstructured, fieldManager string) (*unstructured.Unstructured, error) + ExtractStatus(object *unstructured.Unstructured, fieldManager string) (*unstructured.Unstructured, error) +} + +// gvkParserCache caches the GVKParser in order to prevent from having to repeatedly +// parse the models from the open API schema when the schema itself changes infrequently. +type gvkParserCache struct { + // discoveryClient is the client for retrieving the openAPI document and checking + // whether the document has changed recently + discoveryClient discovery.DiscoveryInterface + // mu protects the gvkParser + mu sync.Mutex + // gvkParser retrieves the objectType for a given gvk + gvkParser *managedfields.GvkParser + // lastChecked is the last time we checked if the openAPI doc has changed. + lastChecked time.Time +} + +// regenerateGVKParser builds the parser from the raw OpenAPI schema. +func regenerateGVKParser(dc discovery.DiscoveryInterface) (*managedfields.GvkParser, error) { + doc, err := dc.OpenAPISchema() + if err != nil { + return nil, err + } + + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, err + } + + return managedfields.NewGVKParser(models, false) +} + +// objectTypeForGVK retrieves the typed.ParseableType for a given gvk from the cache +func (c *gvkParserCache) objectTypeForGVK(gvk schema.GroupVersionKind) (*typed.ParseableType, error) { + c.mu.Lock() + defer c.mu.Unlock() + // if the ttl on the openAPISchema has expired, + // regenerate the gvk parser + if time.Since(c.lastChecked) > openAPISchemaTTL { + c.lastChecked = time.Now() + parser, err := regenerateGVKParser(c.discoveryClient) + if err != nil { + return nil, err + } + c.gvkParser = parser + } + return c.gvkParser.Type(gvk), nil +} + +type extractor struct { + cache *gvkParserCache +} + +// NewUnstructuredExtractor creates the extractor with which you can extract the applied configuration +// for a given manager from an unstructured object. +func NewUnstructuredExtractor(dc discovery.DiscoveryInterface) (UnstructuredExtractor, error) { + parser, err := regenerateGVKParser(dc) + if err != nil { + return nil, fmt.Errorf("failed generating initial GVK Parser: %v", err) + } + return &extractor{ + cache: &gvkParserCache{ + gvkParser: parser, + discoveryClient: dc, + }, + }, nil +} + +// Extract extracts the applied configuration owned by fieldManager from an unstructured object. +// Note that the apply configuration itself is also an unstructured object. +func (e *extractor) Extract(object *unstructured.Unstructured, fieldManager string) (*unstructured.Unstructured, error) { + return e.extractUnstructured(object, fieldManager, "") +} + +// ExtractStatus is the same as ExtractUnstructured except +// that it extracts the status subresource applied configuration. +// Experimental! +func (e *extractor) ExtractStatus(object *unstructured.Unstructured, fieldManager string) (*unstructured.Unstructured, error) { + return e.extractUnstructured(object, fieldManager, "status") +} + +func (e *extractor) extractUnstructured(object *unstructured.Unstructured, fieldManager string, subresource string) (*unstructured.Unstructured, error) { + gvk := object.GetObjectKind().GroupVersionKind() + objectType, err := e.cache.objectTypeForGVK(gvk) + if err != nil { + return nil, fmt.Errorf("failed to fetch the objectType: %v", err) + } + result := &unstructured.Unstructured{} + err = managedfields.ExtractInto(object, *objectType, fieldManager, result, subresource) + if err != nil { + return nil, fmt.Errorf("failed calling ExtractInto for unstructured: %v", err) + } + result.SetName(object.GetName()) + result.SetNamespace(object.GetNamespace()) + result.SetKind(object.GetKind()) + result.SetAPIVersion(object.GetAPIVersion()) + return result, nil +} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 87de32970..50e59c5d8 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -20,13 +20,14 @@ import ( "context" "encoding/json" "fmt" + "net/http" "net/url" "sort" "strings" "sync" "time" - //lint:ignore SA1019 Keep using module since it's still being maintained and the api of google.golang.org/protobuf/proto differs + //nolint:staticcheck // SA1019 Keep using module since it's still being maintained and the api of google.golang.org/protobuf/proto differs "github.com/golang/protobuf/proto" openapi_v2 "github.com/googleapis/gnostic/openapiv2" @@ -482,12 +483,29 @@ func setDiscoveryDefaults(config *restclient.Config) error { // NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client // can be used to discover supported resources in the API server. +// NewDiscoveryClientForConfig is equivalent to NewDiscoveryClientForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { config := *c if err := setDiscoveryDefaults(&config); err != nil { return nil, err } - client, err := restclient.UnversionedRESTClientFor(&config) + httpClient, err := restclient.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewDiscoveryClientForConfigAndClient(&config, httpClient) +} + +// NewDiscoveryClientForConfigAndClient creates a new DiscoveryClient for the given config. This client +// can be used to discover supported resources in the API server. +// Note the http client provided takes precedence over the configured transport values. +func NewDiscoveryClientForConfigAndClient(c *restclient.Config, httpClient *http.Client) (*DiscoveryClient, error) { + config := *c + if err := setDiscoveryDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.UnversionedRESTClientForConfigAndClient(&config, httpClient) return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err } diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go index f0cc2dbf4..d3835c9fa 100644 --- a/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -18,9 +18,11 @@ package fake import ( "fmt" + "net/http" openapi_v2 "github.com/googleapis/gnostic/openapiv2" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/version" @@ -49,7 +51,13 @@ func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*me return resourceList, nil } } - return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) + return nil, &errors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotFound, + Reason: metav1.StatusReasonNotFound, + Message: fmt.Sprintf("the server could not find the requested resource, GroupVersion %q not found", groupVersion), + }} } // ServerResources returns the supported resources for all groups and versions. diff --git a/vendor/k8s.io/client-go/informers/autoscaling/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/interface.go index 81e839014..2b3b2d0e5 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/interface.go +++ b/vendor/k8s.io/client-go/informers/autoscaling/interface.go @@ -20,6 +20,7 @@ package autoscaling import ( v1 "k8s.io/client-go/informers/autoscaling/v1" + v2 "k8s.io/client-go/informers/autoscaling/v2" v2beta1 "k8s.io/client-go/informers/autoscaling/v2beta1" v2beta2 "k8s.io/client-go/informers/autoscaling/v2beta2" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" @@ -29,6 +30,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V2 provides access to shared informers for resources in V2. + V2() v2.Interface // V2beta1 provides access to shared informers for resources in V2beta1. V2beta1() v2beta1.Interface // V2beta2 provides access to shared informers for resources in V2beta2. @@ -51,6 +54,11 @@ func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } +// V2 returns a new v2.Interface. +func (g *group) V2() v2.Interface { + return v2.New(g.factory, g.namespace, g.tweakListOptions) +} + // V2beta1 returns a new v2beta1.Interface. func (g *group) V2beta1() v2beta1.Interface { return v2beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go new file mode 100644 index 000000000..5ddb3b015 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v2 + +import ( + "context" + time "time" + + autoscalingv2 "k8s.io/api/autoscaling/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v2 "k8s.io/client-go/listers/autoscaling/v2" + cache "k8s.io/client-go/tools/cache" +) + +// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for +// HorizontalPodAutoscalers. +type HorizontalPodAutoscalerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v2.HorizontalPodAutoscalerLister +} + +type horizontalPodAutoscalerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) + }, + }, + &autoscalingv2.HorizontalPodAutoscaler{}, + resyncPeriod, + indexers, + ) +} + +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&autoscalingv2.HorizontalPodAutoscaler{}, f.defaultInformer) +} + +func (f *horizontalPodAutoscalerInformer) Lister() v2.HorizontalPodAutoscalerLister { + return v2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v2/interface.go new file mode 100644 index 000000000..2c71908e4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v2 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. + HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. +func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer { + return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go index b04ca59d3..828a7bc8b 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go @@ -21,6 +21,7 @@ package flowcontrol import ( v1alpha1 "k8s.io/client-go/informers/flowcontrol/v1alpha1" v1beta1 "k8s.io/client-go/informers/flowcontrol/v1beta1" + v1beta2 "k8s.io/client-go/informers/flowcontrol/v1beta2" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -30,6 +31,8 @@ type Interface interface { V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface + // V1beta2 provides access to shared informers for resources in V1beta2. + V1beta2() v1beta2.Interface } type group struct { @@ -52,3 +55,8 @@ func (g *group) V1alpha1() v1alpha1.Interface { func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1beta2 returns a new v1beta2.Interface. +func (g *group) V1beta2() v1beta2.Interface { + return v1beta2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go new file mode 100644 index 000000000..6f6abecea --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + time "time" + + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" + cache "k8s.io/client-go/tools/cache" +) + +// FlowSchemaInformer provides access to a shared informer and lister for +// FlowSchemas. +type FlowSchemaInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.FlowSchemaLister +} + +type flowSchemaInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewFlowSchemaInformer constructs a new informer for FlowSchema type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredFlowSchemaInformer constructs a new informer for FlowSchema type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1beta2().FlowSchemas().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1beta2().FlowSchemas().Watch(context.TODO(), options) + }, + }, + &flowcontrolv1beta2.FlowSchema{}, + resyncPeriod, + indexers, + ) +} + +func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1beta2.FlowSchema{}, f.defaultInformer) +} + +func (f *flowSchemaInformer) Lister() v1beta2.FlowSchemaLister { + return v1beta2.NewFlowSchemaLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/interface.go new file mode 100644 index 000000000..142d55289 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta2 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // FlowSchemas returns a FlowSchemaInformer. + FlowSchemas() FlowSchemaInformer + // PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. + PriorityLevelConfigurations() PriorityLevelConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// FlowSchemas returns a FlowSchemaInformer. +func (v *version) FlowSchemas() FlowSchemaInformer { + return &flowSchemaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. +func (v *version) PriorityLevelConfigurations() PriorityLevelConfigurationInformer { + return &priorityLevelConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go new file mode 100644 index 000000000..306a90185 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + time "time" + + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" + cache "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationInformer provides access to a shared informer and lister for +// PriorityLevelConfigurations. +type PriorityLevelConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.PriorityLevelConfigurationLister +} + +type priorityLevelConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1beta2().PriorityLevelConfigurations().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1beta2().PriorityLevelConfigurations().Watch(context.TODO(), options) + }, + }, + &flowcontrolv1beta2.PriorityLevelConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1beta2.PriorityLevelConfiguration{}, f.defaultInformer) +} + +func (f *priorityLevelConfigurationInformer) Lister() v1beta2.PriorityLevelConfigurationLister { + return v1beta2.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index aede51a5e..5b94a2d2a 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -28,6 +28,7 @@ import ( appsv1beta1 "k8s.io/api/apps/v1beta1" v1beta2 "k8s.io/api/apps/v1beta2" autoscalingv1 "k8s.io/api/autoscaling/v1" + v2 "k8s.io/api/autoscaling/v2" v2beta1 "k8s.io/api/autoscaling/v2beta1" v2beta2 "k8s.io/api/autoscaling/v2beta2" batchv1 "k8s.io/api/batch/v1" @@ -44,6 +45,7 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" @@ -138,6 +140,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case autoscalingv1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1().HorizontalPodAutoscalers().Informer()}, nil + // Group=autoscaling, Version=v2 + case v2.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2().HorizontalPodAutoscalers().Informer()}, nil + // Group=autoscaling, Version=v2beta1 case v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2beta1().HorizontalPodAutoscalers().Informer()}, nil @@ -248,6 +254,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case flowcontrolv1beta1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta1().PriorityLevelConfigurations().Informer()}, nil + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta2 + case flowcontrolv1beta2.SchemeGroupVersion.WithResource("flowschemas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta2().FlowSchemas().Informer()}, nil + case flowcontrolv1beta2.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta2().PriorityLevelConfigurations().Informer()}, nil + // Group=internal.apiserver.k8s.io, Version=v1alpha1 case apiserverinternalv1alpha1.SchemeGroupVersion.WithResource("storageversions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Internal().V1alpha1().StorageVersions().Informer()}, nil diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 55a236fac..3e512a7c2 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -20,6 +20,7 @@ package kubernetes import ( "fmt" + "net/http" discovery "k8s.io/client-go/discovery" admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" @@ -33,6 +34,7 @@ import ( authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + autoscalingv2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2" autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" autoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" @@ -49,6 +51,7 @@ import ( extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" @@ -82,6 +85,7 @@ type Interface interface { AuthorizationV1() authorizationv1.AuthorizationV1Interface AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface AutoscalingV1() autoscalingv1.AutoscalingV1Interface + AutoscalingV2() autoscalingv2.AutoscalingV2Interface AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface BatchV1() batchv1.BatchV1Interface @@ -98,6 +102,7 @@ type Interface interface { ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface + FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface NetworkingV1() networkingv1.NetworkingV1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface NodeV1() nodev1.NodeV1Interface @@ -131,6 +136,7 @@ type Clientset struct { authorizationV1 *authorizationv1.AuthorizationV1Client authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client autoscalingV1 *autoscalingv1.AutoscalingV1Client + autoscalingV2 *autoscalingv2.AutoscalingV2Client autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client batchV1 *batchv1.BatchV1Client @@ -147,6 +153,7 @@ type Clientset struct { extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client + flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client networkingV1 *networkingv1.NetworkingV1Client networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client nodeV1 *nodev1.NodeV1Client @@ -220,6 +227,11 @@ func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return c.autoscalingV1 } +// AutoscalingV2 retrieves the AutoscalingV2Client +func (c *Clientset) AutoscalingV2() autoscalingv2.AutoscalingV2Interface { + return c.autoscalingV2 +} + // AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { return c.autoscalingV2beta1 @@ -300,6 +312,11 @@ func (c *Clientset) FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1In return c.flowcontrolV1beta1 } +// FlowcontrolV1beta2 retrieves the FlowcontrolV1beta2Client +func (c *Clientset) FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface { + return c.flowcontrolV1beta2 +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 @@ -391,190 +408,217 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { // NewForConfig creates a new Clientset for the given config. // If config's RateLimiter is not set and QPS and Burst are acceptable, // NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.Burst <= 0 { return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } + var cs Clientset var err error - cs.admissionregistrationV1, err = admissionregistrationv1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1, err = admissionregistrationv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.internalV1alpha1, err = internalv1alpha1.NewForConfig(&configShallowCopy) + cs.internalV1alpha1, err = internalv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) + cs.appsV1, err = appsv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.appsV1beta1, err = appsv1beta1.NewForConfig(&configShallowCopy) + cs.appsV1beta1, err = appsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.appsV1beta2, err = appsv1beta2.NewForConfig(&configShallowCopy) + cs.appsV1beta2, err = appsv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.authenticationV1, err = authenticationv1.NewForConfig(&configShallowCopy) + cs.authenticationV1, err = authenticationv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.authenticationV1beta1, err = authenticationv1beta1.NewForConfig(&configShallowCopy) + cs.authenticationV1beta1, err = authenticationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.authorizationV1, err = authorizationv1.NewForConfig(&configShallowCopy) + cs.authorizationV1, err = authorizationv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.authorizationV1beta1, err = authorizationv1beta1.NewForConfig(&configShallowCopy) + cs.authorizationV1beta1, err = authorizationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.autoscalingV1, err = autoscalingv1.NewForConfig(&configShallowCopy) + cs.autoscalingV1, err = autoscalingv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.autoscalingV2beta1, err = autoscalingv2beta1.NewForConfig(&configShallowCopy) + cs.autoscalingV2, err = autoscalingv2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.autoscalingV2beta2, err = autoscalingv2beta2.NewForConfig(&configShallowCopy) + cs.autoscalingV2beta1, err = autoscalingv2beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.batchV1, err = batchv1.NewForConfig(&configShallowCopy) + cs.autoscalingV2beta2, err = autoscalingv2beta2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.batchV1beta1, err = batchv1beta1.NewForConfig(&configShallowCopy) + cs.batchV1, err = batchv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.certificatesV1, err = certificatesv1.NewForConfig(&configShallowCopy) + cs.batchV1beta1, err = batchv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.certificatesV1beta1, err = certificatesv1beta1.NewForConfig(&configShallowCopy) + cs.certificatesV1, err = certificatesv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfig(&configShallowCopy) + cs.certificatesV1beta1, err = certificatesv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.coordinationV1, err = coordinationv1.NewForConfig(&configShallowCopy) + cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.coreV1, err = corev1.NewForConfig(&configShallowCopy) + cs.coordinationV1, err = coordinationv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.discoveryV1, err = discoveryv1.NewForConfig(&configShallowCopy) + cs.coreV1, err = corev1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.discoveryV1beta1, err = discoveryv1beta1.NewForConfig(&configShallowCopy) + cs.discoveryV1, err = discoveryv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.eventsV1, err = eventsv1.NewForConfig(&configShallowCopy) + cs.discoveryV1beta1, err = discoveryv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) + cs.eventsV1, err = eventsv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfig(&configShallowCopy) + cs.eventsV1beta1, err = eventsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.flowcontrolV1alpha1, err = flowcontrolv1alpha1.NewForConfig(&configShallowCopy) + cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.flowcontrolV1beta1, err = flowcontrolv1beta1.NewForConfig(&configShallowCopy) + cs.flowcontrolV1alpha1, err = flowcontrolv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy) + cs.flowcontrolV1beta1, err = flowcontrolv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.networkingV1beta1, err = networkingv1beta1.NewForConfig(&configShallowCopy) + cs.flowcontrolV1beta2, err = flowcontrolv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.nodeV1, err = nodev1.NewForConfig(&configShallowCopy) + cs.networkingV1, err = networkingv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) + cs.networkingV1beta1, err = networkingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.nodeV1beta1, err = nodev1beta1.NewForConfig(&configShallowCopy) + cs.nodeV1, err = nodev1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.policyV1, err = policyv1.NewForConfig(&configShallowCopy) + cs.nodeV1alpha1, err = nodev1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy) + cs.nodeV1beta1, err = nodev1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.rbacV1, err = rbacv1.NewForConfig(&configShallowCopy) + cs.policyV1, err = policyv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.rbacV1beta1, err = rbacv1beta1.NewForConfig(&configShallowCopy) + cs.policyV1beta1, err = policyv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.rbacV1alpha1, err = rbacv1alpha1.NewForConfig(&configShallowCopy) + cs.rbacV1, err = rbacv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy) + cs.rbacV1beta1, err = rbacv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.schedulingV1beta1, err = schedulingv1beta1.NewForConfig(&configShallowCopy) + cs.rbacV1alpha1, err = rbacv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.schedulingV1, err = schedulingv1.NewForConfig(&configShallowCopy) + cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.storageV1beta1, err = storagev1beta1.NewForConfig(&configShallowCopy) + cs.schedulingV1beta1, err = schedulingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.storageV1, err = storagev1.NewForConfig(&configShallowCopy) + cs.schedulingV1, err = schedulingv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.storageV1alpha1, err = storagev1alpha1.NewForConfig(&configShallowCopy) + cs.storageV1beta1, err = storagev1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.storageV1, err = storagev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.storageV1alpha1, err = storagev1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -584,53 +628,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // NewForConfigOrDie creates a new Clientset for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.admissionregistrationV1 = admissionregistrationv1.NewForConfigOrDie(c) - cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) - cs.internalV1alpha1 = internalv1alpha1.NewForConfigOrDie(c) - cs.appsV1 = appsv1.NewForConfigOrDie(c) - cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) - cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) - cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) - cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) - cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) - cs.authorizationV1beta1 = authorizationv1beta1.NewForConfigOrDie(c) - cs.autoscalingV1 = autoscalingv1.NewForConfigOrDie(c) - cs.autoscalingV2beta1 = autoscalingv2beta1.NewForConfigOrDie(c) - cs.autoscalingV2beta2 = autoscalingv2beta2.NewForConfigOrDie(c) - cs.batchV1 = batchv1.NewForConfigOrDie(c) - cs.batchV1beta1 = batchv1beta1.NewForConfigOrDie(c) - cs.certificatesV1 = certificatesv1.NewForConfigOrDie(c) - cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) - cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) - cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) - cs.coreV1 = corev1.NewForConfigOrDie(c) - cs.discoveryV1 = discoveryv1.NewForConfigOrDie(c) - cs.discoveryV1beta1 = discoveryv1beta1.NewForConfigOrDie(c) - cs.eventsV1 = eventsv1.NewForConfigOrDie(c) - cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) - cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) - cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.NewForConfigOrDie(c) - cs.flowcontrolV1beta1 = flowcontrolv1beta1.NewForConfigOrDie(c) - cs.networkingV1 = networkingv1.NewForConfigOrDie(c) - cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) - cs.nodeV1 = nodev1.NewForConfigOrDie(c) - cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) - cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) - cs.policyV1 = policyv1.NewForConfigOrDie(c) - cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) - cs.rbacV1 = rbacv1.NewForConfigOrDie(c) - cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c) - cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c) - cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) - cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) - cs.schedulingV1 = schedulingv1.NewForConfigOrDie(c) - cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) - cs.storageV1 = storagev1.NewForConfigOrDie(c) - cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs } // New creates a new Clientset for the given RESTClient. @@ -647,6 +649,7 @@ func New(c rest.Interface) *Clientset { cs.authorizationV1 = authorizationv1.New(c) cs.authorizationV1beta1 = authorizationv1beta1.New(c) cs.autoscalingV1 = autoscalingv1.New(c) + cs.autoscalingV2 = autoscalingv2.New(c) cs.autoscalingV2beta1 = autoscalingv2beta1.New(c) cs.autoscalingV2beta2 = autoscalingv2beta2.New(c) cs.batchV1 = batchv1.New(c) @@ -663,6 +666,7 @@ func New(c rest.Interface) *Clientset { cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) + cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c) cs.networkingV1 = networkingv1.New(c) cs.networkingV1beta1 = networkingv1beta1.New(c) cs.nodeV1 = nodev1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 6f6fc67ae..9ab84ff5d 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -46,6 +46,8 @@ import ( fakeauthorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake" autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" fakeautoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake" + autoscalingv2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2" + fakeautoscalingv2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake" autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" fakeautoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake" autoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" @@ -78,6 +80,8 @@ import ( fakeflowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake" flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" fakeflowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake" + flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" + fakeflowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" @@ -218,6 +222,11 @@ func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake} } +// AutoscalingV2 retrieves the AutoscalingV2Client +func (c *Clientset) AutoscalingV2() autoscalingv2.AutoscalingV2Interface { + return &fakeautoscalingv2.FakeAutoscalingV2{Fake: &c.Fake} +} + // AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { return &fakeautoscalingv2beta1.FakeAutoscalingV2beta1{Fake: &c.Fake} @@ -298,6 +307,11 @@ func (c *Clientset) FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1In return &fakeflowcontrolv1beta1.FakeFlowcontrolV1beta1{Fake: &c.Fake} } +// FlowcontrolV1beta2 retrieves the FlowcontrolV1beta2Client +func (c *Clientset) FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface { + return &fakeflowcontrolv1beta2.FakeFlowcontrolV1beta2{Fake: &c.Fake} +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 789d6428f..c3f0a3d52 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -30,6 +30,7 @@ import ( authorizationv1 "k8s.io/api/authorization/v1" authorizationv1beta1 "k8s.io/api/authorization/v1beta1" autoscalingv1 "k8s.io/api/autoscaling/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" batchv1 "k8s.io/api/batch/v1" @@ -46,6 +47,7 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" @@ -84,6 +86,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ authorizationv1.AddToScheme, authorizationv1beta1.AddToScheme, autoscalingv1.AddToScheme, + autoscalingv2.AddToScheme, autoscalingv2beta1.AddToScheme, autoscalingv2beta2.AddToScheme, batchv1.AddToScheme, @@ -100,6 +103,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ extensionsv1beta1.AddToScheme, flowcontrolv1alpha1.AddToScheme, flowcontrolv1beta1.AddToScheme, + flowcontrolv1beta2.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, nodev1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index a46fb2962..b41466151 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -30,6 +30,7 @@ import ( authorizationv1 "k8s.io/api/authorization/v1" authorizationv1beta1 "k8s.io/api/authorization/v1beta1" autoscalingv1 "k8s.io/api/autoscaling/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" batchv1 "k8s.io/api/batch/v1" @@ -46,6 +47,7 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" @@ -84,6 +86,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ authorizationv1.AddToScheme, authorizationv1beta1.AddToScheme, autoscalingv1.AddToScheme, + autoscalingv2.AddToScheme, autoscalingv2beta1.AddToScheme, autoscalingv2beta2.AddToScheme, batchv1.AddToScheme, @@ -100,6 +103,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ extensionsv1beta1.AddToScheme, flowcontrolv1alpha1.AddToScheme, flowcontrolv1beta1.AddToScheme, + flowcontrolv1beta2.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, nodev1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go index 751273159..10848bed1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/admissionregistration/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *AdmissionregistrationV1Client) ValidatingWebhookConfigurations() Valida } // NewForConfig creates a new AdmissionregistrationV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AdmissionregistrationV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AdmissionregistrationV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AdmissionregistrationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go index cda5c5d63..bbd4cf007 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go @@ -102,7 +102,7 @@ func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutating // Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(mutatingwebhookconfigurationsResource, name), &admissionregistrationv1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootDeleteActionWithOptions(mutatingwebhookconfigurationsResource, name, opts), &admissionregistrationv1.MutatingWebhookConfiguration{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go index 8cf195612..ee769dd3c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go @@ -102,7 +102,7 @@ func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, valida // Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(validatingwebhookconfigurationsResource, name), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootDeleteActionWithOptions(validatingwebhookconfigurationsResource, name, opts), &admissionregistrationv1.ValidatingWebhookConfiguration{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index 2d93ff02e..8fda84b1d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() V } // NewForConfig creates a new AdmissionregistrationV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AdmissionregistrationV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AdmissionregistrationV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AdmissionregistrationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go index c28115631..a06bb7db1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -102,7 +102,7 @@ func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutating // Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootDeleteActionWithOptions(mutatingwebhookconfigurationsResource, name, opts), &v1beta1.MutatingWebhookConfiguration{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go index ac87db44a..27495b31d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -102,7 +102,7 @@ func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, valida // Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootDeleteActionWithOptions(validatingwebhookconfigurationsResource, name, opts), &v1beta1.ValidatingWebhookConfiguration{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go index e43a9a368..1794cb941 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *InternalV1alpha1Client) StorageVersions() StorageVersionInterface { } // NewForConfig creates a new InternalV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*InternalV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new InternalV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*InternalV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go index 71617dc25..8b6a84d7a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go @@ -113,7 +113,7 @@ func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion * // Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. func (c *FakeStorageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(storageversionsResource, name), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootDeleteActionWithOptions(storageversionsResource, name, opts), &v1alpha1.StorageVersion{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index 621c734af..397542eeb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/apps/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -59,12 +61,28 @@ func (c *AppsV1Client) StatefulSets(namespace string) StatefulSetInterface { } // NewForConfig creates a new AppsV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AppsV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AppsV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AppsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go index c7ba0c50a..1116d6dff 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -108,7 +108,7 @@ func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &appsv1.ControllerRevision{}) + Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &appsv1.ControllerRevision{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go index ce69673bc..00c28df9b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -120,7 +120,7 @@ func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *appsv1.Dae // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &appsv1.DaemonSet{}) + Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &appsv1.DaemonSet{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go index 72587cc3f..8899824aa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -122,7 +122,7 @@ func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *appsv1.D // Delete takes name of the deployment and deletes it. Returns an error if one occurs. func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &appsv1.Deployment{}) + Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &appsv1.Deployment{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go index 674b13487..39acfafeb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -122,7 +122,7 @@ func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *appsv1.R // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &appsv1.ReplicaSet{}) + Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &appsv1.ReplicaSet{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go index 14ba5fb9a..e04eb025d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -122,7 +122,7 @@ func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *appsv1 // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &appsv1.StatefulSet{}) + Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &appsv1.StatefulSet{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index e5dd64d98..6b7148c5a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/apps/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -49,12 +51,28 @@ func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface } // NewForConfig creates a new AppsV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AppsV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AppsV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AppsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go index 5cf7bf16c..a4fb08c90 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -108,7 +108,7 @@ func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{}) + Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &v1beta1.ControllerRevision{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index 2ab2ce5ff..2db29b481 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -120,7 +120,7 @@ func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1. // Delete takes name of the deployment and deletes it. Returns an error if one occurs. func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1beta1.Deployment{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index a7c8db240..b756a3239 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -120,7 +120,7 @@ func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) + Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &v1beta1.StatefulSet{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 7ca4e0b20..968abc56f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "net/http" + v1beta2 "k8s.io/api/apps/v1beta2" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -59,12 +61,28 @@ func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface } // NewForConfig creates a new AppsV1beta2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AppsV1beta2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AppsV1beta2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AppsV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go index 90015d915..f3d71bf77 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -108,7 +108,7 @@ func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{}) + Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &v1beta2.ControllerRevision{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go index ef29e2ff8..3ee0f7239 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -120,7 +120,7 @@ func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.Da // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{}) + Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &v1beta2.DaemonSet{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go index e11b3ffd3..38fb2a67c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -120,7 +120,7 @@ func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2. // Delete takes name of the deployment and deletes it. Returns an error if one occurs. func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{}) + Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1beta2.Deployment{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go index 713b1ec6e..2fb89cee0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -120,7 +120,7 @@ func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2. // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{}) + Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &v1beta2.ReplicaSet{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go index 681132307..cc9903c0f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -120,7 +120,7 @@ func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{}) + Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &v1beta2.StatefulSet{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index de8864e22..aea9d0e13 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/authentication/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface { } // NewForConfig creates a new AuthenticationV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AuthenticationV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthenticationV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthenticationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 816bd0a2c..218cb60c3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/authentication/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { } // NewForConfig creates a new AuthenticationV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AuthenticationV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthenticationV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthenticationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go index 2cc226322..edfc90346 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/authorization/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *AuthorizationV1Client) SubjectAccessReviews() SubjectAccessReviewInterf } // NewForConfig creates a new AuthorizationV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AuthorizationV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthorizationV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthorizationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go index 88eac75b7..23b0edf27 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/authorization/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *AuthorizationV1beta1Client) SubjectAccessReviews() SubjectAccessReviewI } // NewForConfig creates a new AuthorizationV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AuthorizationV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthorizationV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthorizationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go index 4f3e96aec..f3a2752cb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/autoscaling/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *AutoscalingV1Client) HorizontalPodAutoscalers(namespace string) Horizon } // NewForConfig creates a new AutoscalingV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AutoscalingV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AutoscalingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index 2c54f08ef..dcd47480b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -120,7 +120,7 @@ func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizon // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &autoscalingv1.HorizontalPodAutoscaler{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go new file mode 100644 index 000000000..04d5d0f94 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2 + +import ( + "net/http" + + v2 "k8s.io/api/autoscaling/v2" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AutoscalingV2Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV2Client is used to interact with features provided by the autoscaling group. +type AutoscalingV2Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV2Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*AutoscalingV2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AutoscalingV2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AutoscalingV2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &AutoscalingV2Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV2Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV2Client { + return &AutoscalingV2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/doc.go new file mode 100644 index 000000000..86fb4bf58 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_autoscaling_client.go new file mode 100644 index 000000000..d4b907f4b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_autoscaling_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAutoscalingV2 struct { + *testing.Fake +} + +func (c *FakeAutoscalingV2) HorizontalPodAutoscalers(namespace string) v2.HorizontalPodAutoscalerInterface { + return &FakeHorizontalPodAutoscalers{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAutoscalingV2) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go new file mode 100644 index 000000000..ca4b24704 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go @@ -0,0 +1,190 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v2 "k8s.io/api/autoscaling/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + testing "k8s.io/client-go/testing" +) + +// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type FakeHorizontalPodAutoscalers struct { + Fake *FakeAutoscalingV2 + ns string +} + +var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v2", Resource: "horizontalpodautoscalers"} + +var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v2", Kind: "HorizontalPodAutoscaler"} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.HorizontalPodAutoscaler), err +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2.HorizontalPodAutoscalerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v2.HorizontalPodAutoscalerList{ListMeta: obj.(*v2.HorizontalPodAutoscalerList).ListMeta} + for _, item := range obj.(*v2.HorizontalPodAutoscalerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.HorizontalPodAutoscaler), err +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.HorizontalPodAutoscaler), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.HorizontalPodAutoscaler), err +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v2.HorizontalPodAutoscaler{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v2.HorizontalPodAutoscalerList{}) + return err +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.HorizontalPodAutoscaler), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. +func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { + if horizontalPodAutoscaler == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") + } + data, err := json.Marshal(horizontalPodAutoscaler) + if err != nil { + return nil, err + } + name := horizontalPodAutoscaler.Name + if name == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.HorizontalPodAutoscaler), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { + if horizontalPodAutoscaler == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") + } + data, err := json.Marshal(horizontalPodAutoscaler) + if err != nil { + return nil, err + } + name := horizontalPodAutoscaler.Name + if name == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2.HorizontalPodAutoscaler), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/generated_expansion.go new file mode 100644 index 000000000..0470400fd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go new file mode 100644 index 000000000..3a077d71d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v2 "k8s.io/api/autoscaling/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts v1.ListOptions) (*v2.HorizontalPodAutoscalerList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) + Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) + ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV2Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(horizontalPodAutoscaler). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(horizontalPodAutoscaler). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(horizontalPodAutoscaler). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { + if horizontalPodAutoscaler == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(horizontalPodAutoscaler) + if err != nil { + return nil, err + } + name := horizontalPodAutoscaler.Name + if name == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") + } + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { + if horizontalPodAutoscaler == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(horizontalPodAutoscaler) + if err != nil { + return nil, err + } + + name := horizontalPodAutoscaler.Name + if name == nil { + return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") + } + + result = &v2.HorizontalPodAutoscaler{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go index c1a91fc3e..d1dde5ed1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta1 import ( + "net/http" + v2beta1 "k8s.io/api/autoscaling/v2beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *AutoscalingV2beta1Client) HorizontalPodAutoscalers(namespace string) Ho } // NewForConfig creates a new AutoscalingV2beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AutoscalingV2beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AutoscalingV2beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AutoscalingV2beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index 4967ca8f0..2a750abdd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -120,7 +120,7 @@ func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizon // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v2beta1.HorizontalPodAutoscaler{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go index bd2b39270..cae1b4e43 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta2 import ( + "net/http" + v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *AutoscalingV2beta2Client) HorizontalPodAutoscalers(namespace string) Ho } // NewForConfig creates a new AutoscalingV2beta2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*AutoscalingV2beta2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AutoscalingV2beta2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AutoscalingV2beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go index d00334340..00d642d85 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go @@ -120,7 +120,7 @@ func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizon // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v2beta2.HorizontalPodAutoscaler{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go index ba414eebc..eee144f71 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/batch/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *BatchV1Client) Jobs(namespace string) JobInterface { } // NewForConfig creates a new BatchV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*BatchV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new BatchV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*BatchV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go index 05441e4cf..39f4ca023 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go @@ -120,7 +120,7 @@ func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *batchv1.CronJo // Delete takes name of the cronJob and deletes it. Returns an error if one occurs. func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(cronjobsResource, c.ns, name), &batchv1.CronJob{}) + Invokes(testing.NewDeleteActionWithOptions(cronjobsResource, c.ns, name, opts), &batchv1.CronJob{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index 8da3bb13c..c44cddb3c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -120,7 +120,7 @@ func (c *FakeJobs) UpdateStatus(ctx context.Context, job *batchv1.Job, opts v1.U // Delete takes name of the job and deletes it. Returns an error if one occurs. func (c *FakeJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(jobsResource, c.ns, name), &batchv1.Job{}) + Invokes(testing.NewDeleteActionWithOptions(jobsResource, c.ns, name, opts), &batchv1.Job{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go index 257085358..ebbf063ec 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/batch/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *BatchV1beta1Client) CronJobs(namespace string) CronJobInterface { } // NewForConfig creates a new BatchV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*BatchV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new BatchV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*BatchV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go index 959b5cfe5..a8bad764f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -120,7 +120,7 @@ func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJo // Delete takes name of the cronJob and deletes it. Returns an error if one occurs. func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{}) + Invokes(testing.NewDeleteActionWithOptions(cronjobsResource, c.ns, name, opts), &v1beta1.CronJob{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go index 25aea93c7..6d87c539e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/certificates/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *CertificatesV1Client) CertificateSigningRequests() CertificateSigningRe } // NewForConfig creates a new CertificatesV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*CertificatesV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertificatesV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertificatesV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go index acb9e579e..90a1e6dc6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go @@ -113,7 +113,7 @@ func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certi // Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(certificatesigningrequestsResource, name), &certificatesv1.CertificateSigningRequest{}) + Invokes(testing.NewRootDeleteActionWithOptions(certificatesigningrequestsResource, name, opts), &certificatesv1.CertificateSigningRequest{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go index 1c52d551b..fa97b441d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/certificates/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *CertificatesV1beta1Client) CertificateSigningRequests() CertificateSign } // NewForConfig creates a new CertificatesV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*CertificatesV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertificatesV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertificatesV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index 68a1627d6..5a416150a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -113,7 +113,7 @@ func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certi // Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootDeleteActionWithOptions(certificatesigningrequestsResource, name, opts), &v1beta1.CertificateSigningRequest{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go index 0df7b71bf..e19469d53 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/coordination/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *CoordinationV1Client) Leases(namespace string) LeaseInterface { } // NewForConfig creates a new CoordinationV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*CoordinationV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoordinationV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go index a27f4765e..c52c828ef 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go @@ -108,7 +108,7 @@ func (c *FakeLeases) Update(ctx context.Context, lease *coordinationv1.Lease, op // Delete takes name of the lease and deletes it. Returns an error if one occurs. func (c *FakeLeases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(leasesResource, c.ns, name), &coordinationv1.Lease{}) + Invokes(testing.NewDeleteActionWithOptions(leasesResource, c.ns, name, opts), &coordinationv1.Lease{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go index d68ed5d34..27d674e23 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/coordination/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *CoordinationV1beta1Client) Leases(namespace string) LeaseInterface { } // NewForConfig creates a new CoordinationV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*CoordinationV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoordinationV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go index 303f765c2..15b6c401d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go @@ -108,7 +108,7 @@ func (c *FakeLeases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.U // Delete takes name of the lease and deletes it. Returns an error if one occurs. func (c *FakeLeases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(leasesResource, c.ns, name), &v1beta1.Lease{}) + Invokes(testing.NewDeleteActionWithOptions(leasesResource, c.ns, name, opts), &v1beta1.Lease{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go index 428d2afa3..6e59e4cc6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -114,12 +116,28 @@ func (c *CoreV1Client) ServiceAccounts(namespace string) ServiceAccountInterface } // NewForConfig creates a new CoreV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*CoreV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoreV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoreV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index 211cf0603..a3fdf57a9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -34,6 +34,7 @@ type EventExpansion interface { CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) + // PatchWithEventNamespace is the same as a Patch, except that it sends the request to the event.Namespace. PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) // Search finds events about the specified object Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) @@ -66,6 +67,9 @@ func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { // created with the "" namespace. Update also requires the ResourceVersion to be set in the event // object. func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } result := &v1.Event{} err := e.client.Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index a80fb38dc..7e5c02daa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -102,7 +102,7 @@ func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *cor // Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. func (c *FakeComponentStatuses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(componentstatusesResource, name), &corev1.ComponentStatus{}) + Invokes(testing.NewRootDeleteActionWithOptions(componentstatusesResource, name, opts), &corev1.ComponentStatus{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index ba0058d61..b74b376a9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -108,7 +108,7 @@ func (c *FakeConfigMaps) Update(ctx context.Context, configMap *corev1.ConfigMap // Delete takes name of the configMap and deletes it. Returns an error if one occurs. func (c *FakeConfigMaps) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(configmapsResource, c.ns, name), &corev1.ConfigMap{}) + Invokes(testing.NewDeleteActionWithOptions(configmapsResource, c.ns, name, opts), &corev1.ConfigMap{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index c9484ad24..e9a515a1a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -108,7 +108,7 @@ func (c *FakeEndpoints) Update(ctx context.Context, endpoints *corev1.Endpoints, // Delete takes name of the endpoints and deletes it. Returns an error if one occurs. func (c *FakeEndpoints) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(endpointsResource, c.ns, name), &corev1.Endpoints{}) + Invokes(testing.NewDeleteActionWithOptions(endpointsResource, c.ns, name, opts), &corev1.Endpoints{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index 71e34a948..951883943 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -108,7 +108,7 @@ func (c *FakeEvents) Update(ctx context.Context, event *corev1.Event, opts v1.Up // Delete takes name of the event and deletes it. Returns an error if one occurs. func (c *FakeEvents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &corev1.Event{}) + Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &corev1.Event{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index 8c6c11ca1..7487285e9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -108,7 +108,7 @@ func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *corev1.LimitRa // Delete takes name of the limitRange and deletes it. Returns an error if one occurs. func (c *FakeLimitRanges) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(limitrangesResource, c.ns, name), &corev1.LimitRange{}) + Invokes(testing.NewDeleteActionWithOptions(limitrangesResource, c.ns, name, opts), &corev1.LimitRange{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index 6b56afecb..83ada9f72 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -113,7 +113,7 @@ func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *corev1.Nam // Delete takes name of the namespace and deletes it. Returns an error if one occurs. func (c *FakeNamespaces) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(namespacesResource, name), &corev1.Namespace{}) + Invokes(testing.NewRootDeleteActionWithOptions(namespacesResource, name, opts), &corev1.Namespace{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index 7a4071ab0..82816deb4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -113,7 +113,7 @@ func (c *FakeNodes) UpdateStatus(ctx context.Context, node *corev1.Node, opts v1 // Delete takes name of the node and deletes it. Returns an error if one occurs. func (c *FakeNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(nodesResource, name), &corev1.Node{}) + Invokes(testing.NewRootDeleteActionWithOptions(nodesResource, name, opts), &corev1.Node{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index b2b5b954f..d071a45db 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -113,7 +113,7 @@ func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolu // Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. func (c *FakePersistentVolumes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(persistentvolumesResource, name), &corev1.PersistentVolume{}) + Invokes(testing.NewRootDeleteActionWithOptions(persistentvolumesResource, name, opts), &corev1.PersistentVolume{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index 952d4decb..6e4cef260 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -120,7 +120,7 @@ func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persisten // Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. func (c *FakePersistentVolumeClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(persistentvolumeclaimsResource, c.ns, name), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewDeleteActionWithOptions(persistentvolumeclaimsResource, c.ns, name, opts), &corev1.PersistentVolumeClaim{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index 94312ce33..38a147955 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -120,7 +120,7 @@ func (c *FakePods) UpdateStatus(ctx context.Context, pod *corev1.Pod, opts v1.Up // Delete takes name of the pod and deletes it. Returns an error if one occurs. func (c *FakePods) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(podsResource, c.ns, name), &corev1.Pod{}) + Invokes(testing.NewDeleteActionWithOptions(podsResource, c.ns, name, opts), &corev1.Pod{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index 40c91611f..00711f36f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -108,7 +108,7 @@ func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *corev1.PodTe // Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. func (c *FakePodTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(podtemplatesResource, c.ns, name), &corev1.PodTemplate{}) + Invokes(testing.NewDeleteActionWithOptions(podtemplatesResource, c.ns, name, opts), &corev1.PodTemplate{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index 01cfd9088..086f4dbf9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -121,7 +121,7 @@ func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicati // Delete takes name of the replicationController and deletes it. Returns an error if one occurs. func (c *FakeReplicationControllers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(replicationcontrollersResource, c.ns, name), &corev1.ReplicationController{}) + Invokes(testing.NewDeleteActionWithOptions(replicationcontrollersResource, c.ns, name, opts), &corev1.ReplicationController{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index c8af473f7..9e008a973 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -120,7 +120,7 @@ func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *co // Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(resourcequotasResource, c.ns, name), &corev1.ResourceQuota{}) + Invokes(testing.NewDeleteActionWithOptions(resourcequotasResource, c.ns, name, opts), &corev1.ResourceQuota{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index b20c5edf4..b46730533 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -108,7 +108,7 @@ func (c *FakeSecrets) Update(ctx context.Context, secret *corev1.Secret, opts v1 // Delete takes name of the secret and deletes it. Returns an error if one occurs. func (c *FakeSecrets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(secretsResource, c.ns, name), &corev1.Secret{}) + Invokes(testing.NewDeleteActionWithOptions(secretsResource, c.ns, name, opts), &corev1.Secret{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index 1da10581e..bfbabec07 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -120,7 +120,7 @@ func (c *FakeServices) UpdateStatus(ctx context.Context, service *corev1.Service // Delete takes name of the service and deletes it. Returns an error if one occurs. func (c *FakeServices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(servicesResource, c.ns, name), &corev1.Service{}) + Invokes(testing.NewDeleteActionWithOptions(servicesResource, c.ns, name, opts), &corev1.Service{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index 487c6f0f0..ff468957a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -109,7 +109,7 @@ func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *corev1 // Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. func (c *FakeServiceAccounts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(serviceaccountsResource, c.ns, name), &corev1.ServiceAccount{}) + Invokes(testing.NewDeleteActionWithOptions(serviceaccountsResource, c.ns, name, opts), &corev1.ServiceAccount{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go index cb2632762..9041443b3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/discovery/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *DiscoveryV1Client) EndpointSlices(namespace string) EndpointSliceInterf } // NewForConfig creates a new DiscoveryV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*DiscoveryV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new DiscoveryV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*DiscoveryV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go index 9113061ff..20e58b151 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go @@ -108,7 +108,7 @@ func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *discover // Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(endpointslicesResource, c.ns, name), &discoveryv1.EndpointSlice{}) + Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &discoveryv1.EndpointSlice{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go index 997239a95..193d5e9eb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/discovery/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *DiscoveryV1beta1Client) EndpointSlices(namespace string) EndpointSliceI } // NewForConfig creates a new DiscoveryV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*DiscoveryV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new DiscoveryV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*DiscoveryV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go index 2019fdb44..249de3446 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go @@ -108,7 +108,7 @@ func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1. // Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{}) + Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &v1beta1.EndpointSlice{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go index 9230ca5ca..8c73918d1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/events/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *EventsV1Client) Events(namespace string) EventInterface { } // NewForConfig creates a new EventsV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*EventsV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new EventsV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go index 3e7065d31..87ff13c82 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go @@ -108,7 +108,7 @@ func (c *FakeEvents) Update(ctx context.Context, event *eventsv1.Event, opts v1. // Delete takes name of the event and deletes it. Returns an error if one occurs. func (c *FakeEvents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &eventsv1.Event{}) + Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &eventsv1.Event{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index e0ae41dfe..7213193bf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -33,7 +33,7 @@ type EventExpansion interface { // UpdateWithEventNamespace is the same as a Update // except that it sends the request to the event.Namespace. UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) - // PatchWithEventNamespace is the same as an Update + // PatchWithEventNamespace is the same as a Patch // except that it sends the request to the event.Namespace. PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go index e372ccffa..66506bf88 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/events/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *EventsV1beta1Client) Events(namespace string) EventInterface { } // NewForConfig creates a new EventsV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*EventsV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new EventsV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go index a8e0a023d..5481c4d3f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -108,7 +108,7 @@ func (c *FakeEvents) Update(ctx context.Context, event *v1beta1.Event, opts v1.U // Delete takes name of the event and deletes it. Returns an error if one occurs. func (c *FakeEvents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &v1beta1.Event{}) + Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &v1beta1.Event{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index e3b22aa44..827b514df 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -64,12 +66,28 @@ func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterf } // NewForConfig creates a new ExtensionsV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ExtensionsV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ExtensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index f83c312e6..11c05f195 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -120,7 +120,7 @@ func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.Da // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) + Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &v1beta1.DaemonSet{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index d9506650d..9edf85d60 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -120,7 +120,7 @@ func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1. // Delete takes name of the deployment and deletes it. Returns an error if one occurs. func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1beta1.Deployment{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index 46db8d067..d3bbd1abc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -120,7 +120,7 @@ func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingre // Delete takes name of the ingress and deletes it. Returns an error if one occurs. func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &v1beta1.Ingress{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go index 4ae650b0d..8faa9e6ce 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go @@ -108,7 +108,7 @@ func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1beta1 // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(networkpoliciesResource, c.ns, name), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewDeleteActionWithOptions(networkpoliciesResource, c.ns, name, opts), &v1beta1.NetworkPolicy{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go index c4d0e9c53..dd53fd5bd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go @@ -102,7 +102,7 @@ func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy // Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. func (c *FakePodSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) + Invokes(testing.NewRootDeleteActionWithOptions(podsecuritypoliciesResource, name, opts), &v1beta1.PodSecurityPolicy{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index a93deca7e..d6d35e493 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -120,7 +120,7 @@ func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1. // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) + Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &v1beta1.ReplicaSet{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go index 824318c88..06730a1ea 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go @@ -113,7 +113,7 @@ func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1 // Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(flowschemasResource, name), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1alpha1.FlowSchema{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go index f6f7c8a5b..0e7228254 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go @@ -113,7 +113,7 @@ func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, prio // Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(prioritylevelconfigurationsResource, name), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1alpha1.PriorityLevelConfiguration{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go index 37a1ff2d7..c6f2d9405 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *FlowcontrolV1alpha1Client) PriorityLevelConfigurations() PriorityLevelC } // NewForConfig creates a new FlowcontrolV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new FlowcontrolV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go index 378c666d6..58d5ed14e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go @@ -113,7 +113,7 @@ func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1. // Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(flowschemasResource, name), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1beta1.FlowSchema{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go index fe6850b30..19fc62beb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go @@ -113,7 +113,7 @@ func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, prio // Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(prioritylevelconfigurationsResource, name), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1beta1.PriorityLevelConfiguration{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go index 9a8ba560e..c29cfca95 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/flowcontrol/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *FlowcontrolV1beta1Client) PriorityLevelConfigurations() PriorityLevelCo } // NewForConfig creates a new FlowcontrolV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*FlowcontrolV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new FlowcontrolV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/doc.go new file mode 100644 index 000000000..56518ef7f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowcontrol_client.go new file mode 100644 index 000000000..9f36b3b7a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowcontrol_client.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeFlowcontrolV1beta2 struct { + *testing.Fake +} + +func (c *FakeFlowcontrolV1beta2) FlowSchemas() v1beta2.FlowSchemaInterface { + return &FakeFlowSchemas{c} +} + +func (c *FakeFlowcontrolV1beta2) PriorityLevelConfigurations() v1beta2.PriorityLevelConfigurationInterface { + return &FakePriorityLevelConfigurations{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeFlowcontrolV1beta2) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go new file mode 100644 index 000000000..e4603eba7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go @@ -0,0 +1,179 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + testing "k8s.io/client-go/testing" +) + +// FakeFlowSchemas implements FlowSchemaInterface +type FakeFlowSchemas struct { + Fake *FakeFlowcontrolV1beta2 +} + +var flowschemasResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Resource: "flowschemas"} + +var flowschemasKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta2.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.FlowSchema), err +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta2.FlowSchemaList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta2.FlowSchemaList{ListMeta: obj.(*v1beta2.FlowSchemaList).ListMeta} + for _, item := range obj.(*v1beta2.FlowSchemaList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta2.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.FlowSchema), err +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta2.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.FlowSchema), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta2.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.FlowSchema), err +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1beta2.FlowSchema{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta2.FlowSchemaList{}) + return err +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta2.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.FlowSchema), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. +func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { + if flowSchema == nil { + return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") + } + data, err := json.Marshal(flowSchema) + if err != nil { + return nil, err + } + name := flowSchema.Name + if name == nil { + return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta2.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.FlowSchema), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { + if flowSchema == nil { + return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") + } + data, err := json.Marshal(flowSchema) + if err != nil { + return nil, err + } + name := flowSchema.Name + if name == nil { + return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta2.FlowSchema{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.FlowSchema), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go new file mode 100644 index 000000000..e1979d075 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go @@ -0,0 +1,179 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + testing "k8s.io/client-go/testing" +) + +// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface +type FakePriorityLevelConfigurations struct { + Fake *FakeFlowcontrolV1beta2 +} + +var prioritylevelconfigurationsResource = schema.GroupVersionResource{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Resource: "prioritylevelconfigurations"} + +var prioritylevelconfigurationsKind = schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta2.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.PriorityLevelConfiguration), err +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta2.PriorityLevelConfigurationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta2.PriorityLevelConfigurationList{ListMeta: obj.(*v1beta2.PriorityLevelConfigurationList).ListMeta} + for _, item := range obj.(*v1beta2.PriorityLevelConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.PriorityLevelConfiguration), err +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.PriorityLevelConfiguration), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.PriorityLevelConfiguration), err +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1beta2.PriorityLevelConfiguration{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta2.PriorityLevelConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta2.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.PriorityLevelConfiguration), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. +func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + if priorityLevelConfiguration == nil { + return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") + } + data, err := json.Marshal(priorityLevelConfiguration) + if err != nil { + return nil, err + } + name := priorityLevelConfiguration.Name + if name == nil { + return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta2.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.PriorityLevelConfiguration), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + if priorityLevelConfiguration == nil { + return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") + } + data, err := json.Marshal(priorityLevelConfiguration) + if err != nil { + return nil, err + } + name := priorityLevelConfiguration.Name + if name == nil { + return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta2.PriorityLevelConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta2.PriorityLevelConfiguration), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go new file mode 100644 index 000000000..f3cca4fc7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "net/http" + + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type FlowcontrolV1beta2Interface interface { + RESTClient() rest.Interface + FlowSchemasGetter + PriorityLevelConfigurationsGetter +} + +// FlowcontrolV1beta2Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1beta2Client struct { + restClient rest.Interface +} + +func (c *FlowcontrolV1beta2Client) FlowSchemas() FlowSchemaInterface { + return newFlowSchemas(c) +} + +func (c *FlowcontrolV1beta2Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { + return newPriorityLevelConfigurations(c) +} + +// NewForConfig creates a new FlowcontrolV1beta2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*FlowcontrolV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new FlowcontrolV1beta2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &FlowcontrolV1beta2Client{client}, nil +} + +// NewForConfigOrDie creates a new FlowcontrolV1beta2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1beta2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FlowcontrolV1beta2Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1beta2Client { + return &FlowcontrolV1beta2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FlowcontrolV1beta2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go new file mode 100644 index 000000000..3a1f12b6a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// FlowSchemasGetter has a method to return a FlowSchemaInterface. +// A group's client should implement this interface. +type FlowSchemasGetter interface { + FlowSchemas() FlowSchemaInterface +} + +// FlowSchemaInterface has methods to work with FlowSchema resources. +type FlowSchemaInterface interface { + Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (*v1beta2.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.FlowSchemaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) + ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) + FlowSchemaExpansion +} + +// flowSchemas implements FlowSchemaInterface +type flowSchemas struct { + client rest.Interface +} + +// newFlowSchemas returns a FlowSchemas +func newFlowSchemas(c *FlowcontrolV1beta2Client) *flowSchemas { + return &flowSchemas{ + client: c.RESTClient(), + } +} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { + result = &v1beta2.FlowSchema{} + err = c.client.Get(). + Resource("flowschemas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.FlowSchemaList{} + err = c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { + result = &v1beta2.FlowSchema{} + err = c.client.Post(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { + result = &v1beta2.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { + result = &v1beta2.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(flowSchema). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("flowschemas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("flowschemas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { + result = &v1beta2.FlowSchema{} + err = c.client.Patch(pt). + Resource("flowschemas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. +func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { + if flowSchema == nil { + return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(flowSchema) + if err != nil { + return nil, err + } + name := flowSchema.Name + if name == nil { + return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") + } + result = &v1beta2.FlowSchema{} + err = c.client.Patch(types.ApplyPatchType). + Resource("flowschemas"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { + if flowSchema == nil { + return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(flowSchema) + if err != nil { + return nil, err + } + + name := flowSchema.Name + if name == nil { + return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") + } + + result = &v1beta2.FlowSchema{} + err = c.client.Patch(types.ApplyPatchType). + Resource("flowschemas"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/generated_expansion.go similarity index 68% rename from vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/generated_expansion.go index fa946f6f0..1d4477006 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/counter/noop.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package counter +// Code generated by client-gen. DO NOT EDIT. -// NoOp is a GoRoutineCounter that does not actually count -type NoOp struct{} +package v1beta2 -var _ GoRoutineCounter = NoOp{} +type FlowSchemaExpansion interface{} -// Add would adjust the count, if a count were being kept -func (NoOp) Add(int) {} +type PriorityLevelConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go new file mode 100644 index 000000000..f028869f1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. +// A group's client should implement this interface. +type PriorityLevelConfigurationsGetter interface { + PriorityLevelConfigurations() PriorityLevelConfigurationInterface +} + +// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. +type PriorityLevelConfigurationInterface interface { + Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta2.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta2.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) + ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) + PriorityLevelConfigurationExpansion +} + +// priorityLevelConfigurations implements PriorityLevelConfigurationInterface +type priorityLevelConfigurations struct { + client rest.Interface +} + +// newPriorityLevelConfigurations returns a PriorityLevelConfigurations +func newPriorityLevelConfigurations(c *FlowcontrolV1beta2Client) *priorityLevelConfigurations { + return &priorityLevelConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.PriorityLevelConfigurationList{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Post(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(priorityLevelConfiguration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Patch(pt). + Resource("prioritylevelconfigurations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + if priorityLevelConfiguration == nil { + return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(priorityLevelConfiguration) + if err != nil { + return nil, err + } + name := priorityLevelConfiguration.Name + if name == nil { + return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") + } + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Patch(types.ApplyPatchType). + Resource("prioritylevelconfigurations"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + if priorityLevelConfiguration == nil { + return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(priorityLevelConfiguration) + if err != nil { + return nil, err + } + + name := priorityLevelConfiguration.Name + if name == nil { + return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") + } + + result = &v1beta2.PriorityLevelConfiguration{} + err = c.client.Patch(types.ApplyPatchType). + Resource("prioritylevelconfigurations"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go index f2feced2a..8ffcd7f97 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go @@ -120,7 +120,7 @@ func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *networkingv1. // Delete takes name of the ingress and deletes it. Returns an error if one occurs. func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(ingressesResource, c.ns, name), &networkingv1.Ingress{}) + Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &networkingv1.Ingress{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go index 200d9f68b..fe320c4b7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go @@ -102,7 +102,7 @@ func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *networkin // Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(ingressclassesResource, name), &networkingv1.IngressClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(ingressclassesResource, name, opts), &networkingv1.IngressClass{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index 5be9e8b57..88e05ba57 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -108,7 +108,7 @@ func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *network // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(networkpoliciesResource, c.ns, name), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewDeleteActionWithOptions(networkpoliciesResource, c.ns, name, opts), &networkingv1.NetworkPolicy{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go index c83b93575..3b72a7ae9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/networking/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -49,12 +51,28 @@ func (c *NetworkingV1Client) NetworkPolicies(namespace string) NetworkPolicyInte } // NewForConfig creates a new NetworkingV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*NetworkingV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NetworkingV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go index 21f5f6245..349196c53 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go @@ -120,7 +120,7 @@ func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingre // Delete takes name of the ingress and deletes it. Returns an error if one occurs. func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &v1beta1.Ingress{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go index a0bcebe6f..6ef8d1edd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go @@ -102,7 +102,7 @@ func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1beta1.I // Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(ingressclassesResource, name), &v1beta1.IngressClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(ingressclassesResource, name, opts), &v1beta1.IngressClass{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go index 849ac219f..851634ed0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/networking/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *NetworkingV1beta1Client) IngressClasses() IngressClassInterface { } // NewForConfig creates a new NetworkingV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*NetworkingV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NetworkingV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go index c79a924ba..3a1aaf1b6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go @@ -102,7 +102,7 @@ func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *nodev1.Ru // Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(runtimeclassesResource, name), &nodev1.RuntimeClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &nodev1.RuntimeClass{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go index 7f0da811b..844f9fc70 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/node/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *NodeV1Client) RuntimeClasses() RuntimeClassInterface { } // NewForConfig creates a new NodeV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*NodeV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NodeV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NodeV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go index 22694eea3..4b6387943 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go @@ -102,7 +102,7 @@ func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1. // Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(runtimeclassesResource, name), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &v1alpha1.RuntimeClass{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go index e7acc27e4..2a197d58e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/node/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *NodeV1alpha1Client) RuntimeClasses() RuntimeClassInterface { } // NewForConfig creates a new NodeV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*NodeV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NodeV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NodeV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go index 5aba91599..191162114 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go @@ -102,7 +102,7 @@ func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.R // Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(runtimeclassesResource, name), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &v1beta1.RuntimeClass{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go index b38d9acac..4f6802ffa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/node/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *NodeV1beta1Client) RuntimeClasses() RuntimeClassInterface { } // NewForConfig creates a new NodeV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*NodeV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NodeV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NodeV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go index 5763782a9..8023e2cd6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go @@ -120,7 +120,7 @@ func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisrupti // Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(poddisruptionbudgetsResource, c.ns, name), &policyv1.PodDisruptionBudget{}) + Invokes(testing.NewDeleteActionWithOptions(poddisruptionbudgetsResource, c.ns, name, opts), &policyv1.PodDisruptionBudget{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go index ecd29deb8..9bfd98aa9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/policy/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *PolicyV1Client) PodDisruptionBudgets(namespace string) PodDisruptionBud } // NewForConfig creates a new PolicyV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*PolicyV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new PolicyV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*PolicyV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index 7c0352daa..d1c856754 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -120,7 +120,7 @@ func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisrupti // Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewDeleteActionWithOptions(poddisruptionbudgetsResource, c.ns, name, opts), &v1beta1.PodDisruptionBudget{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go index a7dffc032..614d4e799 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go @@ -102,7 +102,7 @@ func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy // Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. func (c *FakePodSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) + Invokes(testing.NewRootDeleteActionWithOptions(podsecuritypoliciesResource, name, opts), &v1beta1.PodSecurityPolicy{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 8b8b22c6d..5b65c9c0a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -49,12 +51,28 @@ func (c *PolicyV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { } // NewForConfig creates a new PolicyV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*PolicyV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new PolicyV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*PolicyV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go index b4950675b..21a32ca04 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -102,7 +102,7 @@ func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *rbacv1.Clust // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &rbacv1.ClusterRole{}) + Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &rbacv1.ClusterRole{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go index 08d30e62d..c65399598 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -102,7 +102,7 @@ func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &rbacv1.ClusterRoleBinding{}) + Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &rbacv1.ClusterRoleBinding{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go index ae724c65e..4a70d0c35 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -108,7 +108,7 @@ func (c *FakeRoles) Update(ctx context.Context, role *rbacv1.Role, opts v1.Updat // Delete takes name of the role and deletes it. Returns an error if one occurs. func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &rbacv1.Role{}) + Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &rbacv1.Role{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go index f47924378..0db37dd57 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -108,7 +108,7 @@ func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *rbacv1.RoleB // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &rbacv1.RoleBinding{}) + Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &rbacv1.RoleBinding{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go index 1bc0179c6..a02f0357d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/rbac/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *RbacV1Client) RoleBindings(namespace string) RoleBindingInterface { } // NewForConfig creates a new RbacV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*RbacV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new RbacV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*RbacV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index 91015097a..7e8e3a9b8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -102,7 +102,7 @@ func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.Clu // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &v1alpha1.ClusterRole{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index 4c7b77215..5c5e63278 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -102,7 +102,7 @@ func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &v1alpha1.ClusterRoleBinding{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index 5e72ab7f1..cec3f099d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -108,7 +108,7 @@ func (c *FakeRoles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.Upd // Delete takes name of the role and deletes it. Returns an error if one occurs. func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1alpha1.Role{}) + Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &v1alpha1.Role{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index 65250f7eb..2d66494fd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -108,7 +108,7 @@ func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1alpha1.Rol // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) + Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &v1alpha1.RoleBinding{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go index efbbc68be..cc5b309e9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/rbac/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *RbacV1alpha1Client) RoleBindings(namespace string) RoleBindingInterface } // NewForConfig creates a new RbacV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*RbacV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new RbacV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*RbacV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index e8a2ad352..2b4f60054 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -102,7 +102,7 @@ func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1beta1.Clus // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &v1beta1.ClusterRole{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index 6695b1c4e..379261ec8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -102,7 +102,7 @@ func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &v1beta1.ClusterRoleBinding{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index b73fc56c2..44df9128a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -108,7 +108,7 @@ func (c *FakeRoles) Update(ctx context.Context, role *v1beta1.Role, opts v1.Upda // Delete takes name of the role and deletes it. Returns an error if one occurs. func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1beta1.Role{}) + Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &v1beta1.Role{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index 2e3f6ab7f..5e90e6dbd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -108,7 +108,7 @@ func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1beta1.Role // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) + Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &v1beta1.RoleBinding{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go index 4db94cfad..8dac5c1d4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *RbacV1beta1Client) RoleBindings(namespace string) RoleBindingInterface } // NewForConfig creates a new RbacV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*RbacV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new RbacV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*RbacV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go index bc1819c27..30f82270e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go @@ -102,7 +102,7 @@ func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *schedul // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(priorityclassesResource, name), &schedulingv1.PriorityClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &schedulingv1.PriorityClass{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go index 5028bac89..11fc4b9f3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/scheduling/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *SchedulingV1Client) PriorityClasses() PriorityClassInterface { } // NewForConfig creates a new SchedulingV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*SchedulingV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new SchedulingV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SchedulingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go index c8090f06c..32b6a11cb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -102,7 +102,7 @@ func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1alpha // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(priorityclassesResource, name), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &v1alpha1.PriorityClass{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go index 83bc0b8a9..47fb774a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/scheduling/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *SchedulingV1alpha1Client) PriorityClasses() PriorityClassInterface { } // NewForConfig creates a new SchedulingV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*SchedulingV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new SchedulingV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SchedulingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go index ae415da0f..8ff5a1bd6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go @@ -102,7 +102,7 @@ func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1beta1 // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(priorityclassesResource, name), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &v1beta1.PriorityClass{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go index 373f5cca8..dbaf69414 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/scheduling/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *SchedulingV1beta1Client) PriorityClasses() PriorityClassInterface { } // NewForConfig creates a new SchedulingV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*SchedulingV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new SchedulingV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SchedulingV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go index b001aaa94..42df475dc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go @@ -102,7 +102,7 @@ func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *storagev1.CSIDri // Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(csidriversResource, name), &storagev1.CSIDriver{}) + Invokes(testing.NewRootDeleteActionWithOptions(csidriversResource, name, opts), &storagev1.CSIDriver{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go index 7089d5362..4041d5e63 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go @@ -102,7 +102,7 @@ func (c *FakeCSINodes) Update(ctx context.Context, cSINode *storagev1.CSINode, o // Delete takes name of the cSINode and deletes it. Returns an error if one occurs. func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(csinodesResource, name), &storagev1.CSINode{}) + Invokes(testing.NewRootDeleteActionWithOptions(csinodesResource, name, opts), &storagev1.CSINode{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index 4b175356b..60895be5b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -102,7 +102,7 @@ func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *storagev1 // Delete takes name of the storageClass and deletes it. Returns an error if one occurs. func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &storagev1.StorageClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(storageclassesResource, name, opts), &storagev1.StorageClass{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go index 63f53fd52..e48943f8e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go @@ -113,7 +113,7 @@ func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachme // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &storagev1.VolumeAttachment{}) + Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &storagev1.VolumeAttachment{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index f03beae85..b31862f43 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "net/http" + v1 "k8s.io/api/storage/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -54,12 +56,28 @@ func (c *StorageV1Client) VolumeAttachments() VolumeAttachmentInterface { } // NewForConfig creates a new StorageV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*StorageV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new StorageV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*StorageV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go index 004f58449..f56ed7895 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go @@ -108,7 +108,7 @@ func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacit // Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(csistoragecapacitiesResource, c.ns, name), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewDeleteActionWithOptions(csistoragecapacitiesResource, c.ns, name, opts), &v1alpha1.CSIStorageCapacity{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go index a9a8d76fb..eb6ec7983 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -113,7 +113,7 @@ func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachme // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &v1alpha1.VolumeAttachment{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index 968692300..c9bf11d76 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "k8s.io/api/storage/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -44,12 +46,28 @@ func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { } // NewForConfig creates a new StorageV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*StorageV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new StorageV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*StorageV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go index d4482f39e..c65fa7422 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go @@ -102,7 +102,7 @@ func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDrive // Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(csidriversResource, name), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootDeleteActionWithOptions(csidriversResource, name, opts), &v1beta1.CSIDriver{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go index 1bee83d70..1ffd49b3a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go @@ -102,7 +102,7 @@ func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opt // Delete takes name of the cSINode and deletes it. Returns an error if one occurs. func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(csinodesResource, name), &v1beta1.CSINode{}) + Invokes(testing.NewRootDeleteActionWithOptions(csinodesResource, name, opts), &v1beta1.CSINode{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go index e216b9905..1715884d1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go @@ -108,7 +108,7 @@ func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacit // Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(csistoragecapacitiesResource, c.ns, name), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewDeleteActionWithOptions(csistoragecapacitiesResource, c.ns, name, opts), &v1beta1.CSIStorageCapacity{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index 2cf03cb5a..f929c55f8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -102,7 +102,7 @@ func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1beta1.S // Delete takes name of the storageClass and deletes it. Returns an error if one occurs. func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &v1beta1.StorageClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(storageclassesResource, name, opts), &v1beta1.StorageClass{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go index 3ba1d539a..81df87769 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go @@ -113,7 +113,7 @@ func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachme // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &v1beta1.VolumeAttachment{}) return err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 19267b362..4c7604bd2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "net/http" + v1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" @@ -59,12 +61,28 @@ func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface { } // NewForConfig creates a new StorageV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*StorageV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new StorageV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*StorageV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2/expansion_generated.go b/vendor/k8s.io/client-go/listers/autoscaling/v2/expansion_generated.go new file mode 100644 index 000000000..97742b77b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v2 + +// HorizontalPodAutoscalerListerExpansion allows custom methods to be added to +// HorizontalPodAutoscalerLister. +type HorizontalPodAutoscalerListerExpansion interface{} + +// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to +// HorizontalPodAutoscalerNamespaceLister. +type HorizontalPodAutoscalerNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go new file mode 100644 index 000000000..a5cef2772 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v2 + +import ( + v2 "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. +// All objects returned here must be treated as read-only. +type HorizontalPodAutoscalerLister interface { + // List lists all HorizontalPodAutoscalers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) + // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister + HorizontalPodAutoscalerListerExpansion +} + +// horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. +type horizontalPodAutoscalerLister struct { + indexer cache.Indexer +} + +// NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. +func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { + return &horizontalPodAutoscalerLister{indexer: indexer} +} + +// List lists all HorizontalPodAutoscalers in the indexer. +func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v2.HorizontalPodAutoscaler)) + }) + return ret, err +} + +// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. +func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { + return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. +// All objects returned here must be treated as read-only. +type HorizontalPodAutoscalerNamespaceLister interface { + // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) + // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v2.HorizontalPodAutoscaler, error) + HorizontalPodAutoscalerNamespaceListerExpansion +} + +// horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister +// interface. +type horizontalPodAutoscalerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. +func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v2.HorizontalPodAutoscaler)) + }) + return ret, err +} + +// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. +func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2.HorizontalPodAutoscaler, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v2.Resource("horizontalpodautoscaler"), name) + } + return obj.(*v2.HorizontalPodAutoscaler), nil +} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/expansion_generated.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/expansion_generated.go new file mode 100644 index 000000000..b658de654 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta2 + +// FlowSchemaListerExpansion allows custom methods to be added to +// FlowSchemaLister. +type FlowSchemaListerExpansion interface{} + +// PriorityLevelConfigurationListerExpansion allows custom methods to be added to +// PriorityLevelConfigurationLister. +type PriorityLevelConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go new file mode 100644 index 000000000..2710f2630 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// FlowSchemaLister helps list FlowSchemas. +// All objects returned here must be treated as read-only. +type FlowSchemaLister interface { + // List lists all FlowSchemas in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta2.FlowSchema, err error) + // Get retrieves the FlowSchema from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta2.FlowSchema, error) + FlowSchemaListerExpansion +} + +// flowSchemaLister implements the FlowSchemaLister interface. +type flowSchemaLister struct { + indexer cache.Indexer +} + +// NewFlowSchemaLister returns a new FlowSchemaLister. +func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { + return &flowSchemaLister{indexer: indexer} +} + +// List lists all FlowSchemas in the indexer. +func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta2.FlowSchema, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.FlowSchema)) + }) + return ret, err +} + +// Get retrieves the FlowSchema from the index for a given name. +func (s *flowSchemaLister) Get(name string) (*v1beta2.FlowSchema, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("flowschema"), name) + } + return obj.(*v1beta2.FlowSchema), nil +} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go new file mode 100644 index 000000000..00ede0070 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/flowcontrol/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. +// All objects returned here must be treated as read-only. +type PriorityLevelConfigurationLister interface { + // List lists all PriorityLevelConfigurations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta2.PriorityLevelConfiguration, err error) + // Get retrieves the PriorityLevelConfiguration from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta2.PriorityLevelConfiguration, error) + PriorityLevelConfigurationListerExpansion +} + +// priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. +type priorityLevelConfigurationLister struct { + indexer cache.Indexer +} + +// NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. +func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { + return &priorityLevelConfigurationLister{indexer: indexer} +} + +// List lists all PriorityLevelConfigurations in the indexer. +func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta2.PriorityLevelConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.PriorityLevelConfiguration)) + }) + return ret, err +} + +// Get retrieves the PriorityLevelConfiguration from the index for a given name. +func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta2.PriorityLevelConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("prioritylevelconfiguration"), name) + } + return obj.(*v1beta2.PriorityLevelConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go index 39e7ef259..277d9d93e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go index 60ab25d81..893933c45 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go index cce2e603a..dac177e93 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go index b0e503af4..fc59decef 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go index a73d31b3f..ce614c0b8 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go index dd621a3ac..5070cb91b 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go index 8daed8052..c82993897 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go index 3a72ece0c..d7a801c27 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go index 73e63fc11..198b5be4a 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go index 045b07f5b..3103629f6 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index 7a0984626..e405e3dc1 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -38,7 +38,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/util/clock" + utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/pkg/apis/clientauthentication/install" clientauthenticationv1 "k8s.io/client-go/pkg/apis/clientauthentication/v1" @@ -49,6 +49,7 @@ import ( "k8s.io/client-go/transport" "k8s.io/client-go/util/connrotation" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) const execInfoEnv = "KUBERNETES_EXEC_INFO" @@ -316,11 +317,17 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { return nil } +var _ utilnet.RoundTripperWrapper = &roundTripper{} + type roundTripper struct { a *Authenticator base http.RoundTripper } +func (r *roundTripper) WrappedRoundTripper() http.RoundTripper { + return r.base +} + func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { // If a user has already set credentials, use that. This makes commands like // "kubectl get --token (token) pods" work. diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go index 967039662..ebfbd715c 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go @@ -1,3 +1,4 @@ +//go:build !providerless // +build !providerless /* diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 3735750bb..4909dc53a 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -202,6 +202,8 @@ func (c *Config) String() string { type ImpersonationConfig struct { // UserName is the username to impersonate on each request. UserName string + // UID is a unique value that identifies the user. + UID string // Groups are the groups to impersonate on each request. Groups []string // Extra is a free-form field which can be used to link some authentication information @@ -303,6 +305,8 @@ type ContentConfig struct { // object. Note that a RESTClient may require fields that are optional when initializing a Client. // A RESTClient created by this method is generic - it expects to operate on an API that follows // the Kubernetes conventions, but may not be the Kubernetes API. +// RESTClientFor is equivalent to calling RESTClientForConfigAndClient(config, httpClient), +// where httpClient was generated with HTTPClientFor(config). func RESTClientFor(config *Config) (*RESTClient, error) { if config.GroupVersion == nil { return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") @@ -311,24 +315,40 @@ func RESTClientFor(config *Config) (*RESTClient, error) { return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } + // Validate config.Host before constructing the transport/client so we can fail fast. + // ServerURL will be obtained later in RESTClientForConfigAndClient() + _, _, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + httpClient, err := HTTPClientFor(config) + if err != nil { + return nil, err + } + + return RESTClientForConfigAndClient(config, httpClient) +} + +// RESTClientForConfigAndClient returns a RESTClient that satisfies the requested attributes on a +// client Config object. +// Unlike RESTClientFor, RESTClientForConfigAndClient allows to pass an http.Client that is shared +// between all the API Groups and Versions. +// Note that the http client takes precedence over the transport values configured. +// The http client defaults to the `http.DefaultClient` if nil. +func RESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RESTClient, error) { + if config.GroupVersion == nil { + return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") + } + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") + } + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) if err != nil { return nil, err } - transport, err := TransportFor(config) - if err != nil { - return nil, err - } - - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - if config.Timeout > 0 { - httpClient.Timeout = config.Timeout - } - } - rateLimiter := config.RateLimiter if rateLimiter == nil { qps := config.QPS @@ -369,24 +389,33 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } + // Validate config.Host before constructing the transport/client so we can fail fast. + // ServerURL will be obtained later in UnversionedRESTClientForConfigAndClient() + _, _, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + httpClient, err := HTTPClientFor(config) + if err != nil { + return nil, err + } + + return UnversionedRESTClientForConfigAndClient(config, httpClient) +} + +// UnversionedRESTClientForConfigAndClient is the same as RESTClientForConfigAndClient, +// except that it allows the config.Version to be empty. +func UnversionedRESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RESTClient, error) { + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") + } + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) if err != nil { return nil, err } - transport, err := TransportFor(config) - if err != nil { - return nil, err - } - - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - if config.Timeout > 0 { - httpClient.Timeout = config.Timeout - } - } - rateLimiter := config.RateLimiter if rateLimiter == nil { qps := config.QPS @@ -608,9 +637,10 @@ func CopyConfig(config *Config) *Config { BearerToken: config.BearerToken, BearerTokenFile: config.BearerTokenFile, Impersonate: ImpersonationConfig{ + UserName: config.Impersonate.UserName, + UID: config.Impersonate.UID, Groups: config.Impersonate.Groups, Extra: config.Impersonate.Extra, - UserName: config.Impersonate.UserName, }, AuthProvider: config.AuthProvider, AuthConfigPersister: config.AuthConfigPersister, diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index e5a8100be..5cc9900b0 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -39,13 +39,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/streaming" - utilclock "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/watch" restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) var ( @@ -619,12 +619,12 @@ type throttleSettings struct { } type throttledLogger struct { - clock utilclock.PassiveClock + clock clock.PassiveClock settings []*throttleSettings } var globalThrottledLogger = &throttledLogger{ - clock: utilclock.RealClock{}, + clock: clock.RealClock{}, settings: []*throttleSettings{ { logLevel: 2, diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index 87792750a..7c38c6d92 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -26,6 +26,27 @@ import ( "k8s.io/client-go/transport" ) +// HTTPClientFor returns an http.Client that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultClient if no special case behavior is needed. +func HTTPClientFor(config *Config) (*http.Client, error) { + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + var httpClient *http.Client + if transport != http.DefaultTransport || config.Timeout > 0 { + httpClient = &http.Client{ + Transport: transport, + Timeout: config.Timeout, + } + } else { + httpClient = http.DefaultClient + } + + return httpClient, nil +} + // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func TLSConfigFor(config *Config) (*tls.Config, error) { @@ -83,6 +104,7 @@ func (c *Config) TransportConfig() (*transport.Config, error) { BearerTokenFile: c.BearerTokenFile, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, + UID: c.Impersonate.UID, Groups: c.Impersonate.Groups, Extra: c.Impersonate.Extra, }, diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go index da4a1624e..86991be3b 100644 --- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go index b6b2c1f22..c8ae0aaf5 100644 --- a/vendor/k8s.io/client-go/testing/actions.go +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -222,10 +222,15 @@ func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresourc } func NewRootDeleteAction(resource schema.GroupVersionResource, name string) DeleteActionImpl { + return NewRootDeleteActionWithOptions(resource, name, metav1.DeleteOptions{}) +} + +func NewRootDeleteActionWithOptions(resource schema.GroupVersionResource, name string, opts metav1.DeleteOptions) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" action.Resource = resource action.Name = name + action.DeleteOptions = opts return action } @@ -241,11 +246,16 @@ func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subres } func NewDeleteAction(resource schema.GroupVersionResource, namespace, name string) DeleteActionImpl { + return NewDeleteActionWithOptions(resource, namespace, name, metav1.DeleteOptions{}) +} + +func NewDeleteActionWithOptions(resource schema.GroupVersionResource, namespace, name string, opts metav1.DeleteOptions) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" action.Resource = resource action.Namespace = namespace action.Name = name + action.DeleteOptions = opts return action } @@ -391,6 +401,7 @@ type UpdateAction interface { type DeleteAction interface { Action GetName() string + GetDeleteOptions() metav1.DeleteOptions } type DeleteCollectionAction interface { @@ -583,17 +594,23 @@ func (a PatchActionImpl) DeepCopy() Action { type DeleteActionImpl struct { ActionImpl - Name string + Name string + DeleteOptions metav1.DeleteOptions } func (a DeleteActionImpl) GetName() string { return a.Name } +func (a DeleteActionImpl) GetDeleteOptions() metav1.DeleteOptions { + return a.DeleteOptions +} + func (a DeleteActionImpl) DeepCopy() Action { return DeleteActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Name: a.Name, + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + DeleteOptions: *a.DeleteOptions.DeepCopy(), } } diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index fe394d16c..c85c29db3 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -21,9 +21,9 @@ import ( "time" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/clock" ) // This file implements a low-level controller that is used in @@ -322,7 +322,7 @@ func NewInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) } // NewIndexerInformer returns an Indexer and a Controller for populating the index @@ -351,7 +351,59 @@ func NewIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) +} + +// TransformFunc allows for transforming an object before it will be processed +// and put into the controller cache and before the corresponding handlers will +// be called on it. +// TransformFunc (similarly to ResourceEventHandler functions) should be able +// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown +// +// The most common usage pattern is to clean-up some parts of the object to +// reduce component memory usage if a given component doesn't care about them. +// given controller doesn't care for them +type TransformFunc func(interface{}) (interface{}, error) + +// NewTransformingInformer returns a Store and a controller for populating +// the store while also providing event notifications. You should only used +// the returned Store for Get/List operations; Add/Modify/Deletes will cause +// the event notifications to be faulty. +// The given transform function will be called on all objects before they will +// put put into the Store and corresponding Add/Modify/Delete handlers will +// be invokved for them. +func NewTransformingInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + transformer TransformFunc, +) (Store, Controller) { + // This will hold the client state, as we know it. + clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) +} + +// NewTransformingIndexerInformer returns an Indexer and a controller for +// populating the index while also providing event notifications. You should +// only used the returned Index for Get/List operations; Add/Modify/Deletes +// will cause the event notifications to be faulty. +// The given transform function will be called on all objects before they will +// be put into the Index and corresponding Add/Modify/Delete handlers will +// be invoked for them. +func NewTransformingIndexerInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers Indexers, + transformer TransformFunc, +) (Indexer, Controller) { + // This will hold the client state, as we know it. + clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) } // newInformer returns a controller for populating the store while also @@ -374,6 +426,7 @@ func newInformer( resyncPeriod time.Duration, h ResourceEventHandler, clientState Store, + transformer TransformFunc, ) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set @@ -393,24 +446,33 @@ func newInformer( Process: func(obj interface{}) error { // from oldest to newest for _, d := range obj.(Deltas) { - switch d.Type { - case Sync, Replaced, Added, Updated: - if old, exists, err := clientState.Get(d.Object); err == nil && exists { - if err := clientState.Update(d.Object); err != nil { - return err - } - h.OnUpdate(old, d.Object) - } else { - if err := clientState.Add(d.Object); err != nil { - return err - } - h.OnAdd(d.Object) - } - case Deleted: - if err := clientState.Delete(d.Object); err != nil { + obj := d.Object + if transformer != nil { + var err error + obj, err = transformer(obj) + if err != nil { return err } - h.OnDelete(d.Object) + } + + switch d.Type { + case Sync, Replaced, Added, Updated: + if old, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { + return err + } + h.OnUpdate(old, obj) + } else { + if err := clientState.Add(obj); err != nil { + return err + } + h.OnAdd(obj) + } + case Deleted: + if err := clientState.Delete(obj); err != nil { + return err + } + h.OnDelete(obj) } } return nil diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 82038e0f9..2da2933ab 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -20,10 +20,12 @@ import ( "errors" "fmt" "sync" + "time" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" ) // DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are @@ -121,7 +123,7 @@ type DeltaFIFO struct { knownObjects KeyListerGetter // Used to indicate a queue is closed so a control loop can exit when a queue is empty. - // Currently, not used to gate any of CRED operations. + // Currently, not used to gate any of CRUD operations. closed bool // emitDeltaTypeReplaced is whether to emit the Replaced or Sync @@ -456,8 +458,8 @@ func (f *DeltaFIFO) listLocked() []interface{} { func (f *DeltaFIFO) ListKeys() []string { f.lock.RLock() defer f.lock.RUnlock() - list := make([]string, 0, len(f.items)) - for key := range f.items { + list := make([]string, 0, len(f.queue)) + for _, key := range f.queue { list = append(list, key) } return list @@ -526,6 +528,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { } id := f.queue[0] f.queue = f.queue[1:] + depth := len(f.queue) if f.initialPopulationCount > 0 { f.initialPopulationCount-- } @@ -536,6 +539,18 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { continue } delete(f.items, id) + // Only log traces if the queue depth is greater than 10 and it takes more than + // 100 milliseconds to process one item from the queue. + // Queue depth never goes high because processing an item is locking the queue, + // and new items can't be added until processing finish. + // https://github.com/kubernetes/kubernetes/issues/103789 + if depth > 10 { + trace := utiltrace.New("DeltaFIFO Pop Process", + utiltrace.Field{Key: "ID", Value: id}, + utiltrace.Field{Key: "Depth", Value: depth}, + utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) + defer trace.LogIfLong(100 * time.Millisecond) + } err := process(item) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) @@ -557,7 +572,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { // of the Deltas associated with K. Otherwise the pre-existing keys // are those listed by `f.knownObjects` and the current object of K is // what `f.knownObjects.GetByKey(K)` returns. -func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { +func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { f.lock.Lock() defer f.lock.Unlock() keys := make(sets.String, len(list)) diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index dfa956193..7abdae737 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -20,8 +20,8 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) // ExpirationCache implements the store interface diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go index 33afd32c8..a16f4735e 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -17,8 +17,8 @@ limitations under the License. package cache import ( - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/clock" ) type fakeThreadSafeMap struct { diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index f82bf22d2..5c9255027 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -127,7 +127,7 @@ type FIFO struct { // Indication the queue is closed. // Used to indicate a queue is closed so a control loop can exit when a queue is empty. - // Currently, not used to gate any of CRED operations. + // Currently, not used to gate any of CRUD operations. closed bool } diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go index fa29e6a70..1d6aae560 100644 --- a/vendor/k8s.io/client-go/tools/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -78,7 +78,7 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { } const ( - // NamespaceIndex is the lookup name for the most comment index function, which is to index by the namespace field. + // NamespaceIndex is the lookup name for the most common index function, which is to index by the namespace field. NamespaceIndex string = "namespace" ) @@ -94,7 +94,7 @@ func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { // Index maps the indexed value to a set of keys in the store that match on that value type Index map[string]sets.String -// Indexers maps a name to a IndexFunc +// Indexers maps a name to an IndexFunc type Indexers map[string]IndexFunc // Indices maps a name to an Index diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 90fa45ddb..0708a6e8b 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/naming" utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -40,6 +39,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" + "k8s.io/utils/clock" "k8s.io/utils/trace" ) @@ -69,7 +69,7 @@ type Reflector struct { // backoff manages backoff of ListWatch backoffManager wait.BackoffManager - // initConnBackoffManager manages backoff the initial connection with the Watch calll of ListAndWatch. + // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch. initConnBackoffManager wait.BackoffManager resyncPeriod time.Duration @@ -319,7 +319,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { panic(r) case <-listCh: } + initTrace.Step("Objects listed", trace.Field{"error", err}) if err != nil { + klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err) return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err) } @@ -338,7 +340,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } r.setIsLastSyncResourceVersionUnavailable(false) // list was successful - initTrace.Step("Objects listed") listMetaInterface, err := meta.ListAccessor(list) if err != nil { return fmt.Errorf("unable to understand list result %#v: %v", list, err) diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 3a3f538a1..4b7fc04e3 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -23,10 +23,10 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/buffer" + "k8s.io/utils/clock" "k8s.io/klog/v2" ) @@ -368,6 +368,10 @@ func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) er func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() + if s.HasStarted() { + klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed") + return + } fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: s.indexer, EmitDeltaTypeReplaced: true, @@ -410,6 +414,12 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { s.controller.Run(stopCh) } +func (s *sharedIndexInformer) HasStarted() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.started +} + func (s *sharedIndexInformer) HasSynced() bool { s.startedLock.Lock() defer s.startedLock.Unlock() @@ -694,9 +704,9 @@ type processorListener struct { // full resync from the shared informer, but modified by two // adjustments. One is imposing a lower bound, // `minimumResyncPeriod`. The other is another lower bound, the - // sharedProcessor's `resyncCheckPeriod`, that is imposed (a) only + // sharedIndexInformer's `resyncCheckPeriod`, that is imposed (a) only // in AddEventHandlerWithResyncPeriod invocations made after the - // sharedProcessor starts and (b) only if the informer does + // sharedIndexInformer starts and (b) only if the informer does // resyncs at all. requestedResyncPeriod time.Duration // resyncPeriod is the threshold that will be used in the logic diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 56251179b..ea34e9035 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -90,7 +90,7 @@ func (c *threadSafeMap) Delete(key string) { c.lock.Lock() defer c.lock.Unlock() if obj, exists := c.items[key]; exists { - c.deleteFromIndices(obj, key) + c.updateIndices(obj, nil, key) delete(c.items, key) } } @@ -251,61 +251,76 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { return nil } -// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj +// updateIndices modifies the objects location in the managed indexes: +// - for create you must provide only the newObj +// - for update you must provide both the oldObj and the newObj +// - for delete you must provide only the oldObj // updateIndices must be called from a function that already has a lock on the cache func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) { - // if we got an old object, we need to remove it before we add it again - if oldObj != nil { - c.deleteFromIndices(oldObj, key) - } + var oldIndexValues, indexValues []string + var err error for name, indexFunc := range c.indexers { - indexValues, err := indexFunc(newObj) + if oldObj != nil { + oldIndexValues, err = indexFunc(oldObj) + } else { + oldIndexValues = oldIndexValues[:0] + } if err != nil { panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } + + if newObj != nil { + indexValues, err = indexFunc(newObj) + } else { + indexValues = indexValues[:0] + } + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + index := c.indices[name] if index == nil { index = Index{} c.indices[name] = index } - for _, indexValue := range indexValues { - set := index[indexValue] - if set == nil { - set = sets.String{} - index[indexValue] = set + for _, value := range oldIndexValues { + // We optimize for the most common case where index returns a single value. + if len(indexValues) == 1 && value == indexValues[0] { + continue } - set.Insert(key) + c.deleteKeyFromIndex(key, value, index) + } + for _, value := range indexValues { + // We optimize for the most common case where index returns a single value. + if len(oldIndexValues) == 1 && value == oldIndexValues[0] { + continue + } + c.addKeyToIndex(key, value, index) } } } -// deleteFromIndices removes the object from each of the managed indexes -// it is intended to be called from a function that already has a lock on the cache -func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) { - for name, indexFunc := range c.indexers { - indexValues, err := indexFunc(obj) - if err != nil { - panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) - } +func (c *threadSafeMap) addKeyToIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + set = sets.String{} + index[indexValue] = set + } + set.Insert(key) +} - index := c.indices[name] - if index == nil { - continue - } - for _, indexValue := range indexValues { - set := index[indexValue] - if set != nil { - set.Delete(key) - - // If we don't delete the set when zero, indices with high cardinality - // short lived resources can cause memory to increase over time from - // unused empty sets. See `kubernetes/kubernetes/issues/84959`. - if len(set) == 0 { - delete(index, indexValue) - } - } - } +func (c *threadSafeMap) deleteKeyFromIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + return + } + set.Delete(key) + // If we don't delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, indexValue) } } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 31716abf1..44a2fb94b 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -124,7 +124,10 @@ type AuthInfo struct { // Impersonate is the username to act-as. // +optional Impersonate string `json:"act-as,omitempty"` - // ImpersonateGroups is the groups to imperonate. + // ImpersonateUID is the uid to impersonate. + // +optional + ImpersonateUID string `json:"act-as-uid,omitempty"` + // ImpersonateGroups is the groups to impersonate. // +optional ImpersonateGroups []string `json:"act-as-groups,omitempty"` // ImpersonateUserExtra contains additional information for impersonated user. diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go index c38ebc076..6eee281bc 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go @@ -165,7 +165,7 @@ func Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(in *map[str newExtension := (*in)[key] oldExtension := runtime.RawExtension{} if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&newExtension, &oldExtension, s); err != nil { - return nil + return err } namedExtension := NamedExtension{key, oldExtension} *out = append(*out, namedExtension) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go index 7e8351032..757ed817b 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -111,10 +111,13 @@ type AuthInfo struct { // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. // +optional TokenFile string `json:"tokenFile,omitempty"` - // Impersonate is the username to imperonate. The name matches the flag. + // Impersonate is the username to impersonate. The name matches the flag. // +optional Impersonate string `json:"as,omitempty"` - // ImpersonateGroups is the groups to imperonate. + // ImpersonateUID is the uid to impersonate. + // +optional + ImpersonateUID string `json:"as-uid,omitempty"` + // ImpersonateGroups is the groups to impersonate. // +optional ImpersonateGroups []string `json:"as-groups,omitempty"` // ImpersonateUserExtra contains additional information for impersonated user. diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go index 13dfa903c..a13bae64d 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -166,6 +167,7 @@ func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s out.Token = in.Token out.TokenFile = in.TokenFile out.Impersonate = in.Impersonate + out.ImpersonateUID = in.ImpersonateUID out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups)) out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra)) out.Username = in.Username @@ -200,6 +202,7 @@ func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s out.Token = in.Token out.TokenFile = in.TokenFile out.Impersonate = in.Impersonate + out.ImpersonateUID = in.ImpersonateUID out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups)) out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra)) out.Username = in.Username diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go index da519dfa3..78492598b 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go index aae469033..6a57decf6 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go index a04de6260..86e4ddef1 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index 0a905490c..cc37c9fbf 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -181,6 +181,7 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { if len(configAuthInfo.Impersonate) > 0 { clientConfig.Impersonate = restclient.ImpersonationConfig{ UserName: configAuthInfo.Impersonate, + UID: configAuthInfo.ImpersonateUID, Groups: configAuthInfo.ImpersonateGroups, Extra: configAuthInfo.ImpersonateUserExtra, } @@ -255,6 +256,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI if len(configAuthInfo.Impersonate) > 0 { mergedConfig.Impersonate = restclient.ImpersonationConfig{ UserName: configAuthInfo.Impersonate, + UID: configAuthInfo.ImpersonateUID, Groups: configAuthInfo.ImpersonateGroups, Extra: configAuthInfo.ImpersonateUserExtra, } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go index 95cba0fac..ff643cc13 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go @@ -53,6 +53,7 @@ type AuthOverrideFlags struct { ClientKey FlagInfo Token FlagInfo Impersonate FlagInfo + ImpersonateUID FlagInfo ImpersonateGroups FlagInfo Username FlagInfo Password FlagInfo @@ -154,6 +155,7 @@ const ( FlagEmbedCerts = "embed-certs" FlagBearerToken = "token" FlagImpersonate = "as" + FlagImpersonateUID = "as-uid" FlagImpersonateGroup = "as-group" FlagUsername = "username" FlagPassword = "password" @@ -179,6 +181,7 @@ func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"}, Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"}, Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"}, + ImpersonateUID: FlagInfo{prefix + FlagImpersonateUID, "", "", "UID to impersonate for the operation"}, ImpersonateGroups: FlagInfo{prefix + FlagImpersonateGroup, "", "", "Group to impersonate for the operation, this flag can be repeated to specify multiple groups."}, Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"}, Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"}, @@ -219,6 +222,7 @@ func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, fl flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey).AddSecretAnnotation(flags) flagNames.Token.BindStringFlag(flags, &authInfo.Token).AddSecretAnnotation(flags) flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate).AddSecretAnnotation(flags) + flagNames.ImpersonateUID.BindStringFlag(flags, &authInfo.ImpersonateUID).AddSecretAnnotation(flags) flagNames.ImpersonateGroups.BindStringArrayFlag(flags, &authInfo.ImpersonateGroups).AddSecretAnnotation(flags) flagNames.Username.BindStringFlag(flags, &authInfo.Username).AddSecretAnnotation(flags) flagNames.Password.BindStringFlag(flags, &authInfo.Password).AddSecretAnnotation(flags) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go index 8541a08b2..2ae1eb706 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/validation.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -229,7 +229,7 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) [ } if proxyURL := clusterInfo.ProxyURL; proxyURL != "" { if _, err := parseProxyURL(proxyURL); err != nil { - validationErrors = append(validationErrors, fmt.Errorf("invalid 'proxy-url' %q for cluster %q: %v", proxyURL, clusterName, err)) + validationErrors = append(validationErrors, fmt.Errorf("invalid 'proxy-url' %q for cluster %q: %w", proxyURL, clusterName, err)) } } // Make sure CA data and CA file aren't both specified @@ -239,7 +239,7 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) [ if len(clusterInfo.CertificateAuthority) != 0 { clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err)) } else { defer clientCertCA.Close() } @@ -278,7 +278,7 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err if len(authInfo.ClientCertificate) != 0 { clientCertFile, err := os.Open(authInfo.ClientCertificate) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err)) } else { defer clientCertFile.Close() } @@ -286,7 +286,7 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err if len(authInfo.ClientKey) != 0 { clientKeyFile, err := os.Open(authInfo.ClientKey) if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err)) } else { defer clientKeyFile.Close() } @@ -323,9 +323,9 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) } - // ImpersonateGroups or ImpersonateUserExtra should be requested with a user - if (len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) { - validationErrors = append(validationErrors, fmt.Errorf("requesting groups or user-extra for %v without impersonating a user", authInfoName)) + // ImpersonateUID, ImpersonateGroups or ImpersonateUserExtra should be requested with a user + if (len(authInfo.ImpersonateUID) > 0 || len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) { + validationErrors = append(validationErrors, fmt.Errorf("requesting uid, groups or user-extra for %v without impersonating a user", authInfoName)) } return validationErrors } diff --git a/vendor/k8s.io/client-go/tools/events/doc.go b/vendor/k8s.io/client-go/tools/events/doc.go index 795582b02..8c48f607e 100644 --- a/vendor/k8s.io/client-go/tools/events/doc.go +++ b/vendor/k8s.io/client-go/tools/events/doc.go @@ -15,5 +15,5 @@ limitations under the License. */ // Package events has all client logic for recording and reporting -// "k8s.io/api/events/v1beta1".Event events. +// "k8s.io/api/events/v1".Event events. package events // import "k8s.io/client-go/tools/events" diff --git a/vendor/k8s.io/client-go/tools/events/event_broadcaster.go b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go index bde888ee9..3dd3cea1a 100644 --- a/vendor/k8s.io/client-go/tools/events/event_broadcaster.go +++ b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go @@ -29,7 +29,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -43,6 +42,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record/util" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) const ( @@ -290,6 +290,20 @@ func getKey(event *eventsv1.Event) eventKey { return key } +// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function. +// The return value can be ignored or used to stop recording, if desired. +func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func() { + return e.StartEventWatcher( + func(obj runtime.Object) { + event, ok := obj.(*eventsv1.Event) + if !ok { + klog.Errorf("unexpected type, expected eventsv1.Event") + return + } + klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note) + }) +} + // StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. // The return value is used to stop recording func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) func() { diff --git a/vendor/k8s.io/client-go/tools/events/event_recorder.go b/vendor/k8s.io/client-go/tools/events/event_recorder.go index 2837cc160..132843742 100644 --- a/vendor/k8s.io/client-go/tools/events/event_recorder.go +++ b/vendor/k8s.io/client-go/tools/events/event_recorder.go @@ -24,12 +24,12 @@ import ( eventsv1 "k8s.io/api/events/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/record/util" "k8s.io/client-go/tools/reference" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) type recorderImpl struct { @@ -48,9 +48,13 @@ func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.O klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", regarding, err, eventtype, reason, message) return } - refRelated, err := reference.GetReference(recorder.scheme, related) - if err != nil { - klog.V(9).Infof("Could not construct reference to: '%#v' due to: '%v'.", related, err) + + var refRelated *v1.ObjectReference + if related != nil { + refRelated, err = reference.GetReference(recorder.scheme, related) + if err != nil { + klog.V(9).Infof("Could not construct reference to: '%#v' due to: '%v'.", related, err) + } } if !util.ValidateEventType(eventtype) { klog.Errorf("Unsupported event type: '%v'", eventtype) diff --git a/vendor/k8s.io/client-go/tools/events/interfaces.go b/vendor/k8s.io/client-go/tools/events/interfaces.go index f1a523caa..57bfa1ce8 100644 --- a/vendor/k8s.io/client-go/tools/events/interfaces.go +++ b/vendor/k8s.io/client-go/tools/events/interfaces.go @@ -20,6 +20,7 @@ import ( eventsv1 "k8s.io/api/events/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ) // EventRecorder knows how to record events on behalf of an EventSource. @@ -56,6 +57,10 @@ type EventBroadcaster interface { // TODO: figure out if this can be removed. StartEventWatcher(eventHandler func(event runtime.Object)) func() + // StartStructuredLogging starts sending events received from this EventBroadcaster to the structured + // logging function. The return value can be ignored or used to stop recording, if desired. + StartStructuredLogging(verbosity klog.Level) func() + // Shutdown shuts down the broadcaster Shutdown() } diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 30a666019..4b61d0052 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -25,13 +25,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/record/util" ref "k8s.io/client-go/tools/reference" "k8s.io/klog/v2" + "k8s.io/utils/clock" ) const maxTriesPerEvent = 12 @@ -81,7 +81,10 @@ type CorrelatorOptions struct { MaxIntervalInSeconds int // The clock used by the EventAggregator to allow for testing // If not specified (zero value), clock.RealClock{} will be used - Clock clock.Clock + Clock clock.PassiveClock + // The func used by EventFilterFunc, which returns a key for given event, based on which filtering will take place + // If not specified (zero value), getSpamKey will be used + SpamKeyFunc EventSpamKeyFunc } // EventRecorder knows how to record events on behalf of an EventSource. @@ -320,7 +323,7 @@ type recorderImpl struct { scheme *runtime.Scheme source v1.EventSource *watch.Broadcaster - clock clock.Clock + clock clock.PassiveClock } func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, eventtype, reason, message string) { diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go index 329c7ce57..4f041e8fd 100644 --- a/vendor/k8s.io/client-go/tools/record/events_cache.go +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -27,10 +27,10 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/util/flowcontrol" + "k8s.io/utils/clock" ) const ( @@ -81,6 +81,9 @@ func getSpamKey(event *v1.Event) string { "") } +// EventSpamKeyFunc is a function that returns unique key based on provided event +type EventSpamKeyFunc func(event *v1.Event) string + // EventFilterFunc is a function that returns true if the event should be skipped type EventFilterFunc func(event *v1.Event) bool @@ -99,31 +102,35 @@ type EventSourceObjectSpamFilter struct { qps float32 // clock is used to allow for testing over a time interval - clock clock.Clock + clock clock.PassiveClock + + // spamKeyFunc is a func used to create a key based on an event, which is later used to filter spam events. + spamKeyFunc EventSpamKeyFunc } // NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill. -func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter { +func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.PassiveClock, spamKeyFunc EventSpamKeyFunc) *EventSourceObjectSpamFilter { return &EventSourceObjectSpamFilter{ - cache: lru.New(lruCacheSize), - burst: burst, - qps: qps, - clock: clock, + cache: lru.New(lruCacheSize), + burst: burst, + qps: qps, + clock: clock, + spamKeyFunc: spamKeyFunc, } } // spamRecord holds data used to perform spam filtering decisions. type spamRecord struct { // rateLimiter controls the rate of events about this object - rateLimiter flowcontrol.RateLimiter + rateLimiter flowcontrol.PassiveRateLimiter } // Filter controls that a given source+object are not exceeding the allowed rate. func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { var record spamRecord - // controls our cached information about this event (source+object) - eventKey := getSpamKey(event) + // controls our cached information about this event + eventKey := f.spamKeyFunc(event) // do we have a record of similar events in our cache? f.Lock() @@ -135,7 +142,7 @@ func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { // verify we have a rate limiter for this record if record.rateLimiter == nil { - record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock) + record.rateLimiter = flowcontrol.NewTokenBucketPassiveRateLimiterWithClock(f.qps, f.burst, f.clock) } // ensure we have available rate @@ -200,12 +207,12 @@ type EventAggregator struct { maxIntervalInSeconds uint // clock is used to allow for testing over a time interval - clock clock.Clock + clock clock.PassiveClock } // NewEventAggregator returns a new instance of an EventAggregator func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, - maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator { + maxEvents int, maxIntervalInSeconds int, clock clock.PassiveClock) *EventAggregator { return &EventAggregator{ cache: lru.New(lruCacheSize), keyFunc: keyFunc, @@ -308,11 +315,11 @@ type eventLog struct { type eventLogger struct { sync.RWMutex cache *lru.Cache - clock clock.Clock + clock clock.PassiveClock } // newEventLogger observes events and counts their frequencies -func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { +func newEventLogger(lruCacheEntries int, clock clock.PassiveClock) *eventLogger { return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} } @@ -429,9 +436,9 @@ type EventCorrelateResult struct { // times. // * A source may burst 25 events about an object, but has a refill rate budget // per object of 1 event every 5 minutes to control long-tail of spam. -func NewEventCorrelator(clock clock.Clock) *EventCorrelator { +func NewEventCorrelator(clock clock.PassiveClock) *EventCorrelator { cacheSize := maxLruCacheEntries - spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock) + spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock, getSpamKey) return &EventCorrelator{ filterFunc: spamFilter.Filter, aggregator: NewEventAggregator( @@ -448,8 +455,12 @@ func NewEventCorrelator(clock clock.Clock) *EventCorrelator { func NewEventCorrelatorWithOptions(options CorrelatorOptions) *EventCorrelator { optionsWithDefaults := populateDefaults(options) - spamFilter := NewEventSourceObjectSpamFilter(optionsWithDefaults.LRUCacheSize, - optionsWithDefaults.BurstSize, optionsWithDefaults.QPS, optionsWithDefaults.Clock) + spamFilter := NewEventSourceObjectSpamFilter( + optionsWithDefaults.LRUCacheSize, + optionsWithDefaults.BurstSize, + optionsWithDefaults.QPS, + optionsWithDefaults.Clock, + optionsWithDefaults.SpamKeyFunc) return &EventCorrelator{ filterFunc: spamFilter.Filter, aggregator: NewEventAggregator( @@ -489,6 +500,9 @@ func populateDefaults(options CorrelatorOptions) CorrelatorOptions { if options.Clock == nil { options.Clock = clock.RealClock{} } + if options.SpamKeyFunc == nil { + options.SpamKeyFunc = getSpamKey + } return options } diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 070474831..89de798f6 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -82,6 +82,8 @@ type Config struct { type ImpersonationConfig struct { // UserName matches user.Info.GetName() UserName string + // UID matches user.Info.GetUID() + UID string // Groups matches user.Info.GetGroups() Groups []string // Extra matches user.Info.GetExtra() diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 818fd52d6..4c74606a9 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -17,9 +17,12 @@ limitations under the License. package transport import ( + "crypto/tls" "fmt" "net/http" + "net/http/httptrace" "strings" + "sync" "time" "golang.org/x/oauth2" @@ -57,6 +60,7 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip rt = NewUserAgentRoundTripper(config.UserAgent, rt) } if len(config.Impersonate.UserName) > 0 || + len(config.Impersonate.UID) > 0 || len(config.Impersonate.Groups) > 0 || len(config.Impersonate.Extra) > 0 { rt = NewImpersonatingRoundTripper(config.Impersonate, rt) @@ -68,7 +72,7 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip func DebugWrappers(rt http.RoundTripper) http.RoundTripper { switch { case bool(klog.V(9).Enabled()): - rt = NewDebuggingRoundTripper(rt, DebugCurlCommand, DebugURLTiming, DebugResponseHeaders) + rt = NewDebuggingRoundTripper(rt, DebugCurlCommand, DebugDetailedTiming, DebugResponseHeaders) case bool(klog.V(8).Enabled()): rt = NewDebuggingRoundTripper(rt, DebugJustURL, DebugRequestHeaders, DebugResponseStatus, DebugResponseHeaders) case bool(klog.V(7).Enabled()): @@ -88,6 +92,8 @@ type authProxyRoundTripper struct { rt http.RoundTripper } +var _ utilnet.RoundTripperWrapper = &authProxyRoundTripper{} + // NewAuthProxyRoundTripper provides a roundtripper which will add auth proxy fields to requests for // authentication terminating proxy cases // assuming you pull the user from the context: @@ -146,6 +152,8 @@ type userAgentRoundTripper struct { rt http.RoundTripper } +var _ utilnet.RoundTripperWrapper = &userAgentRoundTripper{} + // NewUserAgentRoundTripper will add User-Agent header to a request unless it has already been set. func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { return &userAgentRoundTripper{agent, rt} @@ -172,6 +180,8 @@ type basicAuthRoundTripper struct { rt http.RoundTripper } +var _ utilnet.RoundTripperWrapper = &basicAuthRoundTripper{} + // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a // request unless it has already been set. func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper { @@ -199,6 +209,9 @@ const ( // ImpersonateUserHeader is used to impersonate a particular user during an API server request ImpersonateUserHeader = "Impersonate-User" + // ImpersonateUIDHeader is used to impersonate a particular UID during an API server request + ImpersonateUIDHeader = "Impersonate-Uid" + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. // It can be repeated multiplied times for multiple groups. ImpersonateGroupHeader = "Impersonate-Group" @@ -218,6 +231,8 @@ type impersonatingRoundTripper struct { delegate http.RoundTripper } +var _ utilnet.RoundTripperWrapper = &impersonatingRoundTripper{} + // NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set. func NewImpersonatingRoundTripper(impersonate ImpersonationConfig, delegate http.RoundTripper) http.RoundTripper { return &impersonatingRoundTripper{impersonate, delegate} @@ -230,7 +245,9 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons } req = utilnet.CloneRequest(req) req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName) - + if rt.impersonate.UID != "" { + req.Header.Set(ImpersonateUIDHeader, rt.impersonate.UID) + } for _, group := range rt.impersonate.Groups { req.Header.Add(ImpersonateGroupHeader, group) } @@ -255,6 +272,8 @@ type bearerAuthRoundTripper struct { rt http.RoundTripper } +var _ utilnet.RoundTripperWrapper = &bearerAuthRoundTripper{} + // NewBearerAuthRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { @@ -314,6 +333,14 @@ type requestInfo struct { ResponseHeaders http.Header ResponseErr error + muTrace sync.Mutex // Protect trace fields + DNSLookup time.Duration + Dialing time.Duration + GetConnection time.Duration + TLSHandshake time.Duration + ServerProcessing time.Duration + ConnectionReused bool + Duration time.Duration } @@ -356,6 +383,8 @@ type debuggingRoundTripper struct { levels map[DebugLevel]bool } +var _ utilnet.RoundTripperWrapper = &debuggingRoundTripper{} + // DebugLevel is used to enable debugging of certain // HTTP requests and responses fields via the debuggingRoundTripper. type DebugLevel int @@ -374,6 +403,8 @@ const ( DebugResponseStatus // DebugResponseHeaders will add to the debug output the HTTP response headers. DebugResponseHeaders + // DebugDetailedTiming will add to the debug output the duration of the HTTP requests events. + DebugDetailedTiming ) // NewDebuggingRoundTripper allows to display in the logs output debug information @@ -445,6 +476,74 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } startTime := time.Now() + + if rt.levels[DebugDetailedTiming] { + var getConn, dnsStart, dialStart, tlsStart, serverStart time.Time + var host string + trace := &httptrace.ClientTrace{ + // DNS + DNSStart: func(info httptrace.DNSStartInfo) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + dnsStart = time.Now() + host = info.Host + }, + DNSDone: func(info httptrace.DNSDoneInfo) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + reqInfo.DNSLookup = time.Now().Sub(dnsStart) + klog.Infof("HTTP Trace: DNS Lookup for %s resolved to %v", host, info.Addrs) + }, + // Dial + ConnectStart: func(network, addr string) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + dialStart = time.Now() + }, + ConnectDone: func(network, addr string, err error) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + reqInfo.Dialing = time.Now().Sub(dialStart) + if err != nil { + klog.Infof("HTTP Trace: Dial to %s:%s failed: %v", network, addr, err) + } else { + klog.Infof("HTTP Trace: Dial to %s:%s succeed", network, addr) + } + }, + // TLS + TLSHandshakeStart: func() { + tlsStart = time.Now() + }, + TLSHandshakeDone: func(_ tls.ConnectionState, _ error) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + reqInfo.TLSHandshake = time.Now().Sub(tlsStart) + }, + // Connection (it can be DNS + Dial or just the time to get one from the connection pool) + GetConn: func(hostPort string) { + getConn = time.Now() + }, + GotConn: func(info httptrace.GotConnInfo) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + reqInfo.GetConnection = time.Now().Sub(getConn) + reqInfo.ConnectionReused = info.Reused + }, + // Server Processing (time since we wrote the request until first byte is received) + WroteRequest: func(info httptrace.WroteRequestInfo) { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + serverStart = time.Now() + }, + GotFirstResponseByte: func() { + reqInfo.muTrace.Lock() + defer reqInfo.muTrace.Unlock() + reqInfo.ServerProcessing = time.Now().Sub(serverStart) + }, + } + req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace)) + } + response, err := rt.delegatedRoundTripper.RoundTrip(req) reqInfo.Duration = time.Since(startTime) @@ -453,6 +552,24 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e if rt.levels[DebugURLTiming] { klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } + if rt.levels[DebugDetailedTiming] { + stats := "" + if !reqInfo.ConnectionReused { + stats += fmt.Sprintf(`DNSLookup %d ms Dial %d ms TLSHandshake %d ms`, + reqInfo.DNSLookup.Nanoseconds()/int64(time.Millisecond), + reqInfo.Dialing.Nanoseconds()/int64(time.Millisecond), + reqInfo.TLSHandshake.Nanoseconds()/int64(time.Millisecond), + ) + } else { + stats += fmt.Sprintf(`GetConnection %d ms`, reqInfo.GetConnection.Nanoseconds()/int64(time.Millisecond)) + } + if reqInfo.ServerProcessing != 0 { + stats += fmt.Sprintf(` ServerProcessing %d ms`, reqInfo.ServerProcessing.Nanoseconds()/int64(time.Millisecond)) + } + stats += fmt.Sprintf(` Duration %d ms`, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + klog.Infof("HTTP Statistics: %s", stats) + } + if rt.levels[DebugResponseStatus] { klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go index fea02e611..68a0a704f 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -26,6 +26,7 @@ import ( "golang.org/x/oauth2" + utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/klog/v2" ) @@ -95,6 +96,8 @@ type tokenSourceTransport struct { src ResettableTokenSource } +var _ utilnet.RoundTripperWrapper = &tokenSourceTransport{} + func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, error) { // This is to allow --token to override other bearer token providers. if req.Header.Get("Authorization") != "" { @@ -119,6 +122,8 @@ func (tst *tokenSourceTransport) CancelRequest(req *http.Request) { tryCancelRequest(tst.ort, req) } +func (tst *tokenSourceTransport) WrappedRoundTripper() http.RoundTripper { return tst.base } + type fileTokenSource struct { path string period time.Duration diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index bffb15262..75143ec07 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -33,6 +33,7 @@ import ( "time" "k8s.io/client-go/util/keyutil" + netutils "k8s.io/utils/net" ) const duration365d = time.Hour * 24 * 365 @@ -157,7 +158,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a BasicConstraintsValid: true, } - if ip := net.ParseIP(host); ip != nil { + if ip := netutils.ParseIPSloppy(host); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) } else { template.DNSNames = append(template.DNSNames, host) diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index c48ba03e8..3ef88dbdb 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -17,10 +17,12 @@ limitations under the License. package flowcontrol import ( + "math/rand" "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" + testingclock "k8s.io/utils/clock/testing" "k8s.io/utils/integer" ) @@ -35,23 +37,43 @@ type Backoff struct { defaultDuration time.Duration maxDuration time.Duration perItemBackoff map[string]*backoffEntry + rand *rand.Rand + + // maxJitterFactor adds jitter to the exponentially backed off delay. + // if maxJitterFactor is zero, no jitter is added to the delay in + // order to maintain current behavior. + maxJitterFactor float64 } -func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff { - return &Backoff{ - perItemBackoff: map[string]*backoffEntry{}, - Clock: tc, - defaultDuration: initial, - maxDuration: max, - } +func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff { + return newBackoff(tc, initial, max, 0.0) } func NewBackOff(initial, max time.Duration) *Backoff { + return NewBackOffWithJitter(initial, max, 0.0) +} + +func NewFakeBackOffWithJitter(initial, max time.Duration, tc *testingclock.FakeClock, maxJitterFactor float64) *Backoff { + return newBackoff(tc, initial, max, maxJitterFactor) +} + +func NewBackOffWithJitter(initial, max time.Duration, maxJitterFactor float64) *Backoff { + clock := clock.RealClock{} + return newBackoff(clock, initial, max, maxJitterFactor) +} + +func newBackoff(clock clock.Clock, initial, max time.Duration, maxJitterFactor float64) *Backoff { + var random *rand.Rand + if maxJitterFactor > 0 { + random = rand.New(rand.NewSource(clock.Now().UnixNano())) + } return &Backoff{ perItemBackoff: map[string]*backoffEntry{}, - Clock: clock.RealClock{}, + Clock: clock, defaultDuration: initial, maxDuration: max, + maxJitterFactor: maxJitterFactor, + rand: random, } } @@ -74,8 +96,10 @@ func (p *Backoff) Next(id string, eventTime time.Time) { entry, ok := p.perItemBackoff[id] if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { entry = p.initEntryUnsafe(id) + entry.backoff += p.jitter(entry.backoff) } else { - delay := entry.backoff * 2 // exponential + delay := entry.backoff * 2 // exponential + delay += p.jitter(entry.backoff) // add some jitter to the delay entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) } entry.lastUpdate = p.Clock.Now() @@ -143,6 +167,14 @@ func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { return entry } +func (p *Backoff) jitter(delay time.Duration) time.Duration { + if p.rand == nil { + return 0 + } + + return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay)) +} + // After 2*maxDuration we restart the backoff factor to the beginning func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go index ffd912c56..af7abd898 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go @@ -23,26 +23,36 @@ import ( "time" "golang.org/x/time/rate" + "k8s.io/utils/clock" ) -type RateLimiter interface { +type PassiveRateLimiter interface { // TryAccept returns true if a token is taken immediately. Otherwise, // it returns false. TryAccept() bool - // Accept returns once a token becomes available. - Accept() // Stop stops the rate limiter, subsequent calls to CanAccept will return false Stop() // QPS returns QPS of this rate limiter QPS() float32 +} + +type RateLimiter interface { + PassiveRateLimiter + // Accept returns once a token becomes available. + Accept() // Wait returns nil if a token is taken before the Context is done. Wait(ctx context.Context) error } -type tokenBucketRateLimiter struct { +type tokenBucketPassiveRateLimiter struct { limiter *rate.Limiter - clock Clock qps float32 + clock clock.PassiveClock +} + +type tokenBucketRateLimiter struct { + tokenBucketPassiveRateLimiter + clock Clock } // NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach. @@ -52,58 +62,73 @@ type tokenBucketRateLimiter struct { // The maximum number of tokens in the bucket is capped at 'burst'. func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter { limiter := rate.NewLimiter(rate.Limit(qps), burst) - return newTokenBucketRateLimiter(limiter, realClock{}, qps) + return newTokenBucketRateLimiterWithClock(limiter, clock.RealClock{}, qps) +} + +// NewTokenBucketPassiveRateLimiter is similar to NewTokenBucketRateLimiter except that it returns +// a PassiveRateLimiter which does not have Accept() and Wait() methods. +func NewTokenBucketPassiveRateLimiter(qps float32, burst int) PassiveRateLimiter { + limiter := rate.NewLimiter(rate.Limit(qps), burst) + return newTokenBucketRateLimiterWithPassiveClock(limiter, clock.RealClock{}, qps) } // An injectable, mockable clock interface. type Clock interface { - Now() time.Time + clock.PassiveClock Sleep(time.Duration) } -type realClock struct{} - -func (realClock) Now() time.Time { - return time.Now() -} -func (realClock) Sleep(d time.Duration) { - time.Sleep(d) -} +var _ Clock = (*clock.RealClock)(nil) // NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter // but allows an injectable clock, for testing. func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter { limiter := rate.NewLimiter(rate.Limit(qps), burst) - return newTokenBucketRateLimiter(limiter, c, qps) + return newTokenBucketRateLimiterWithClock(limiter, c, qps) } -func newTokenBucketRateLimiter(limiter *rate.Limiter, c Clock, qps float32) RateLimiter { +// NewTokenBucketPassiveRateLimiterWithClock is similar to NewTokenBucketRateLimiterWithClock +// except that it returns a PassiveRateLimiter which does not have Accept() and Wait() methods +// and uses a PassiveClock. +func NewTokenBucketPassiveRateLimiterWithClock(qps float32, burst int, c clock.PassiveClock) PassiveRateLimiter { + limiter := rate.NewLimiter(rate.Limit(qps), burst) + return newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps) +} + +func newTokenBucketRateLimiterWithClock(limiter *rate.Limiter, c Clock, qps float32) *tokenBucketRateLimiter { return &tokenBucketRateLimiter{ - limiter: limiter, - clock: c, - qps: qps, + tokenBucketPassiveRateLimiter: *newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps), + clock: c, } } -func (t *tokenBucketRateLimiter) TryAccept() bool { - return t.limiter.AllowN(t.clock.Now(), 1) +func newTokenBucketRateLimiterWithPassiveClock(limiter *rate.Limiter, c clock.PassiveClock, qps float32) *tokenBucketPassiveRateLimiter { + return &tokenBucketPassiveRateLimiter{ + limiter: limiter, + qps: qps, + clock: c, + } +} + +func (tbprl *tokenBucketPassiveRateLimiter) Stop() { +} + +func (tbprl *tokenBucketPassiveRateLimiter) QPS() float32 { + return tbprl.qps +} + +func (tbprl *tokenBucketPassiveRateLimiter) TryAccept() bool { + return tbprl.limiter.AllowN(tbprl.clock.Now(), 1) } // Accept will block until a token becomes available -func (t *tokenBucketRateLimiter) Accept() { - now := t.clock.Now() - t.clock.Sleep(t.limiter.ReserveN(now, 1).DelayFrom(now)) +func (tbrl *tokenBucketRateLimiter) Accept() { + now := tbrl.clock.Now() + tbrl.clock.Sleep(tbrl.limiter.ReserveN(now, 1).DelayFrom(now)) } -func (t *tokenBucketRateLimiter) Stop() { -} - -func (t *tokenBucketRateLimiter) QPS() float32 { - return t.qps -} - -func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error { - return t.limiter.Wait(ctx) +func (tbrl *tokenBucketRateLimiter) Wait(ctx context.Context) error { + return tbrl.limiter.Wait(ctx) } type fakeAlwaysRateLimiter struct{} @@ -157,3 +182,11 @@ func (t *fakeNeverRateLimiter) QPS() float32 { func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error { return errors.New("can not be accept") } + +var ( + _ RateLimiter = (*tokenBucketRateLimiter)(nil) + _ RateLimiter = (*fakeAlwaysRateLimiter)(nil) + _ RateLimiter = (*fakeNeverRateLimiter)(nil) +) + +var _ PassiveRateLimiter = (*tokenBucketPassiveRateLimiter)(nil) diff --git a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go index 0b1dc338b..49ecd1465 100644 --- a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go +++ b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go @@ -132,9 +132,6 @@ func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) { } continue } - if len(results) == 0 { - break - } fullResult = append(fullResult, results) } return fullResult, nil diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go index 71bb6322e..efda7c197 100644 --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -27,7 +27,7 @@ import ( type RateLimiter interface { // When gets an item and gets to decide how long that item should wait When(item interface{}) time.Duration - // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing // or for success, we'll stop tracking it Forget(item interface{}) // NumRequeues returns back how many failures the item has had @@ -209,3 +209,30 @@ func (r *MaxOfRateLimiter) Forget(item interface{}) { limiter.Forget(item) } } + +// WithMaxWaitRateLimiter have maxDelay which avoids waiting too long +type WithMaxWaitRateLimiter struct { + limiter RateLimiter + maxDelay time.Duration +} + +func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter { + return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay} +} + +func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { + delay := w.limiter.When(item) + if delay > w.maxDelay { + return w.maxDelay + } + + return delay +} + +func (w WithMaxWaitRateLimiter) Forget(item interface{}) { + w.limiter.Forget(item) +} + +func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int { + return w.limiter.NumRequeues(item) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 31d9182de..61c4da530 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -21,8 +21,8 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/clock" ) // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to @@ -51,11 +51,11 @@ func NewNamedDelayingQueue(name string) DelayingInterface { // NewDelayingQueueWithCustomClock constructs a new named workqueue // with ability to inject real or fake clock for testing purposes -func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface { +func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface { return newDelayingQueue(clock, NewNamed(name), name) } -func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType { +func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType { ret := &delayingType{ Interface: q, clock: clock, diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index 556e6432e..4b0a69616 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" ) // This file provides abstractions for setting the provider (e.g., prometheus) diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index f7c14ddcd..6f7063269 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" ) type Interface interface { @@ -29,6 +29,7 @@ type Interface interface { Get() (item interface{}, shutdown bool) Done(item interface{}) ShutDown() + ShutDownWithDrain() ShuttingDown() bool } @@ -46,7 +47,7 @@ func NewNamed(name string) *Type { ) } -func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type { +func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type { t := &Type{ clock: c, dirty: set{}, @@ -86,11 +87,12 @@ type Type struct { cond *sync.Cond shuttingDown bool + drain bool metrics queueMetrics unfinishedWorkUpdatePeriod time.Duration - clock clock.Clock + clock clock.WithTicker } type empty struct{} @@ -110,6 +112,10 @@ func (s set) delete(item t) { delete(s, item) } +func (s set) len() int { + return len(s) +} + // Add marks item as needing processing. func (q *Type) Add(item interface{}) { q.cond.L.Lock() @@ -155,7 +161,10 @@ func (q *Type) Get() (item interface{}, shutdown bool) { return nil, true } - item, q.queue = q.queue[0], q.queue[1:] + item = q.queue[0] + // The underlying array still exists and reference this object, so the object will not be garbage collected. + q.queue[0] = nil + q.queue = q.queue[1:] q.metrics.get(item) @@ -178,13 +187,71 @@ func (q *Type) Done(item interface{}) { if q.dirty.has(item) { q.queue = append(q.queue, item) q.cond.Signal() + } else if q.processing.len() == 0 { + q.cond.Signal() } } -// ShutDown will cause q to ignore all new items added to it. As soon as the -// worker goroutines have drained the existing items in the queue, they will be -// instructed to exit. +// ShutDown will cause q to ignore all new items added to it and +// immediately instruct the worker goroutines to exit. func (q *Type) ShutDown() { + q.setDrain(false) + q.shutdown() +} + +// ShutDownWithDrain will cause q to ignore all new items added to it. As soon +// as the worker goroutines have "drained", i.e: finished processing and called +// Done on all existing items in the queue; they will be instructed to exit and +// ShutDownWithDrain will return. Hence: a strict requirement for using this is; +// your workers must ensure that Done is called on all items in the queue once +// the shut down has been initiated, if that is not the case: this will block +// indefinitely. It is, however, safe to call ShutDown after having called +// ShutDownWithDrain, as to force the queue shut down to terminate immediately +// without waiting for the drainage. +func (q *Type) ShutDownWithDrain() { + q.setDrain(true) + q.shutdown() + for q.isProcessing() && q.shouldDrain() { + q.waitForProcessing() + } +} + +// isProcessing indicates if there are still items on the work queue being +// processed. It's used to drain the work queue on an eventual shutdown. +func (q *Type) isProcessing() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return q.processing.len() != 0 +} + +// waitForProcessing waits for the worker goroutines to finish processing items +// and call Done on them. +func (q *Type) waitForProcessing() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + // Ensure that we do not wait on a queue which is already empty, as that + // could result in waiting for Done to be called on items in an empty queue + // which has already been shut down, which will result in waiting + // indefinitely. + if q.processing.len() == 0 { + return + } + q.cond.Wait() +} + +func (q *Type) setDrain(shouldDrain bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + q.drain = shouldDrain +} + +func (q *Type) shouldDrain() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return q.drain +} + +func (q *Type) shutdown() { q.cond.L.Lock() defer q.cond.L.Unlock() q.shuttingDown = true diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index 884abbde9..ec40fb4ed 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -144,7 +144,9 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io. "NewRootGetAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetAction"}), "NewGetAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetAction"}), "NewRootDeleteAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteAction"}), + "NewRootDeleteActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteActionWithOptions"}), "NewDeleteAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteAction"}), + "NewDeleteActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteActionWithOptions"}), "NewRootDeleteCollectionAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteCollectionAction"}), "NewDeleteCollectionAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteCollectionAction"}), "NewRootUpdateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateAction"}), @@ -412,8 +414,8 @@ var deleteTemplate = ` // Delete takes name of the $.type|private$ and deletes it. Returns an error if one occurs. func (c *Fake$.type|publicPlural$) Delete(ctx context.Context, name string, opts $.DeleteOptions|raw$) error { _, err := c.Fake. - $if .namespaced$Invokes($.NewDeleteAction|raw$($.type|allLowercasePlural$Resource, c.ns, name), &$.type|raw${}) - $else$Invokes($.NewRootDeleteAction|raw$($.type|allLowercasePlural$Resource, name), &$.type|raw${})$end$ + $if .namespaced$Invokes($.NewDeleteActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, name, opts), &$.type|raw${}) + $else$Invokes($.NewRootDeleteActionWithOptions|raw$($.type|allLowercasePlural$Resource, name, opts), &$.type|raw${})$end$ return err } ` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go index 20e8796bf..b0212296c 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -77,9 +77,10 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr "Config": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), "DefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "RESTHTTPClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "HTTPClientFor"}), "DiscoveryInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/discovery", Name: "DiscoveryInterface"}), "DiscoveryClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/discovery", Name: "DiscoveryClient"}), - "NewDiscoveryClientForConfig": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfig"}), + "NewDiscoveryClientForConfigAndClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfigAndClient"}), "NewDiscoveryClientForConfigOrDie": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfigOrDie"}), "NewDiscoveryClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClient"}), "flowcontrolNewTokenBucketRateLimiter": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/flowcontrol", Name: "NewTokenBucketRateLimiter"}), @@ -91,6 +92,7 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr } sw.Do(getDiscoveryTemplate, m) sw.Do(newClientsetForConfigTemplate, m) + sw.Do(newClientsetForConfigAndClientTemplate, m) sw.Do(newClientsetForConfigOrDieTemplate, m) sw.Do(newClientsetForRESTClientTemplate, m) @@ -136,22 +138,43 @@ var newClientsetForConfigTemplate = ` // NewForConfig creates a new Clientset for the given config. // If config's RateLimiter is not set and QPS and Burst are acceptable, // NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *$.Config|raw$) (*Clientset, error) { configShallowCopy := *c + + // share the transport between all clients + httpClient, err := $.RESTHTTPClientFor|raw$(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} +` + +var newClientsetForConfigAndClientTemplate = ` +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *$.Config|raw$, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.Burst <= 0 { return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") } configShallowCopy.RateLimiter = $.flowcontrolNewTokenBucketRateLimiter|raw$(configShallowCopy.QPS, configShallowCopy.Burst) } + var cs Clientset var err error -$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$, err =$.PackageAlias$.NewForConfig(&configShallowCopy) +$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$, err =$.PackageAlias$.NewForConfigAndClient(&configShallowCopy, httpClient) if err!=nil { return nil, err } $end$ - cs.DiscoveryClient, err = $.NewDiscoveryClientForConfig|raw$(&configShallowCopy) + cs.DiscoveryClient, err = $.NewDiscoveryClientForConfigAndClient|raw$(&configShallowCopy, httpClient) if err!=nil { return nil, err } @@ -163,11 +186,11 @@ var newClientsetForConfigOrDieTemplate = ` // NewForConfigOrDie creates a new Clientset for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *$.Config|raw$) *Clientset { - var cs Clientset -$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$ =$.PackageAlias$.NewForConfigOrDie(c) -$end$ - cs.DiscoveryClient = $.NewDiscoveryClientForConfigOrDie|raw$(c) - return &cs + cs, err := NewForConfig(c) + if err!=nil { + panic(err) + } + return cs } ` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go index 215a0171c..302849900 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go @@ -89,20 +89,22 @@ func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer } m := map[string]interface{}{ - "group": g.group, - "version": g.version, - "groupName": groupName, - "GroupGoName": g.groupGoName, - "Version": namer.IC(g.version), - "types": g.types, - "apiPath": apiPath(g.group), - "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), - "runtimeAPIVersionInternal": c.Universe.Variable(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "APIVersionInternal"}), - "restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), - "restDefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), - "restRESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), - "restRESTClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientFor"}), - "SchemeGroupVersion": c.Universe.Variable(types.Name{Package: path.Vendorless(g.inputPackage), Name: "SchemeGroupVersion"}), + "group": g.group, + "version": g.version, + "groupName": groupName, + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "types": g.types, + "apiPath": apiPath(g.group), + "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), + "runtimeAPIVersionInternal": c.Universe.Variable(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "APIVersionInternal"}), + "restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), + "restDefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), + "restRESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "RESTHTTPClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "HTTPClientFor"}), + "restRESTClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientFor"}), + "restRESTClientForConfigAndClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientForConfigAndClient"}), + "SchemeGroupVersion": c.Universe.Variable(types.Name{Package: path.Vendorless(g.inputPackage), Name: "SchemeGroupVersion"}), } sw.Do(groupInterfaceTemplate, m) sw.Do(groupClientTemplate, m) @@ -123,6 +125,7 @@ func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer } } sw.Do(newClientForConfigTemplate, m) + sw.Do(newClientForConfigAndClientTemplate, m) sw.Do(newClientForConfigOrDieTemplate, m) sw.Do(newClientForRESTClientTemplate, m) if g.version == "" { @@ -164,12 +167,30 @@ func (c *$.GroupGoName$$.Version$Client) $.type|publicPlural$() $.type|public$In var newClientForConfigTemplate = ` // NewForConfig creates a new $.GroupGoName$$.Version$Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *$.restConfig|raw$) (*$.GroupGoName$$.Version$Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := $.restRESTClientFor|raw$(&config) + httpClient, err := $.RESTHTTPClientFor|raw$(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} +` + +var newClientForConfigAndClientTemplate = ` +// NewForConfigAndClient creates a new $.GroupGoName$$.Version$Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *$.restConfig|raw$, h *http.Client) (*$.GroupGoName$$.Version$Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := $.restRESTClientForConfigAndClient|raw$(&config, h) if err != nil { return nil, err } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/main.go b/vendor/k8s.io/code-generator/cmd/client-gen/main.go index cc0f72a69..bdb554613 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/main.go @@ -19,10 +19,8 @@ package main import ( "flag" - "path/filepath" "github.com/spf13/pflag" - "k8s.io/gengo/args" "k8s.io/klog/v2" generatorargs "k8s.io/code-generator/cmd/client-gen/args" @@ -36,7 +34,7 @@ func main() { // Override defaults. // TODO: move this out of client-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.GoHeaderFilePath = util.BoilerplatePath() genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/clientset_generated/" genericArgs.AddFlags(pflag.CommandLine) diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go index 6e0f31f06..feba7d029 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -87,10 +87,8 @@ package main import ( "flag" - "path/filepath" "github.com/spf13/pflag" - "k8s.io/gengo/args" "k8s.io/klog/v2" generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" @@ -104,7 +102,7 @@ func main() { // Override defaults. // TODO: move this out of conversion-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.GoHeaderFilePath = util.BoilerplatePath() genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go index 31a4824a5..888c2e306 100644 --- a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -44,10 +44,8 @@ package main import ( "flag" - "path/filepath" "github.com/spf13/pflag" - "k8s.io/gengo/args" "k8s.io/gengo/examples/deepcopy-gen/generators" "k8s.io/klog/v2" @@ -61,7 +59,7 @@ func main() { // Override defaults. // TODO: move this out of deepcopy-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.GoHeaderFilePath = util.BoilerplatePath() genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go index e9aef8d22..f050a1327 100644 --- a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -43,10 +43,8 @@ package main import ( "flag" - "path/filepath" "github.com/spf13/pflag" - "k8s.io/gengo/args" "k8s.io/gengo/examples/defaulter-gen/generators" "k8s.io/klog/v2" @@ -60,7 +58,7 @@ func main() { // Override defaults. // TODO: move this out of defaulter-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.GoHeaderFilePath = util.BoilerplatePath() genericArgs.AddFlags(pflag.CommandLine) customArgs.AddFlags(pflag.CommandLine) diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go index f7c8fb7ff..8a92082be 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go @@ -57,7 +57,7 @@ func New() *Generator { sourceTree := args.DefaultSourceTree() common := args.GeneratorArgs{ OutputBase: sourceTree, - GoHeaderFilePath: filepath.Join(sourceTree, util.BoilerplatePath()), + GoHeaderFilePath: util.BoilerplatePath(), } defaultProtoImport := filepath.Join(sourceTree, "k8s.io", "kubernetes", "vendor", "github.com", "gogo", "protobuf", "protobuf") cwd, err := os.Getwd() diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go index 6eff86bf7..54df66e45 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go @@ -573,7 +573,7 @@ func protobufTagToField(tag string, field *protoField, m types.Member, t *types. switch parts[0] { case "varint", "fixed32", "fixed64", "bytes", "group": default: - name := types.Name{} + var name types.Name if last := strings.LastIndex(parts[0], "."); last != -1 { prefix := parts[0][:last] name = types.Name{ diff --git a/vendor/k8s.io/code-generator/cmd/import-boss/main.go b/vendor/k8s.io/code-generator/cmd/import-boss/main.go index 3fd0cf80d..0fc56445e 100644 --- a/vendor/k8s.io/code-generator/cmd/import-boss/main.go +++ b/vendor/k8s.io/code-generator/cmd/import-boss/main.go @@ -19,7 +19,6 @@ package main import ( "os" - "path/filepath" "github.com/spf13/pflag" "k8s.io/code-generator/pkg/util" @@ -34,7 +33,7 @@ func main() { arguments := args.Default() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + arguments.GoHeaderFilePath = util.BoilerplatePath() pflag.CommandLine.BoolVar(&arguments.IncludeTestFiles, "include-test-files", false, "If true, include *_test.go files.") if err := arguments.Execute( diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/main.go b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go index 6148296a0..650a3d78b 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go @@ -18,12 +18,10 @@ package main import ( "flag" - "path/filepath" "github.com/spf13/pflag" "k8s.io/code-generator/cmd/informer-gen/generators" "k8s.io/code-generator/pkg/util" - "k8s.io/gengo/args" "k8s.io/klog/v2" generatorargs "k8s.io/code-generator/cmd/informer-gen/args" @@ -35,7 +33,7 @@ func main() { // Override defaults. // TODO: move out of informer-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.GoHeaderFilePath = util.BoilerplatePath() genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/informers/informers_generated" customArgs.VersionedClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" customArgs.InternalClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/main.go b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go index 855b4e15c..3418005c6 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go @@ -18,12 +18,10 @@ package main import ( "flag" - "path/filepath" "github.com/spf13/pflag" "k8s.io/code-generator/cmd/lister-gen/generators" "k8s.io/code-generator/pkg/util" - "k8s.io/gengo/args" "k8s.io/klog/v2" generatorargs "k8s.io/code-generator/cmd/lister-gen/args" @@ -35,7 +33,7 @@ func main() { // Override defaults. // TODO: move this out of lister-gen - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.GoHeaderFilePath = util.BoilerplatePath() genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/listers" genericArgs.AddFlags(pflag.CommandLine) diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go index 037adca2f..242eb3aa1 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go @@ -82,7 +82,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // if there is a comment of the form "// +groupName=somegroup" or "// +groupName=somegroup.foo.bar.io", // extract the fully qualified API group name from it and overwrite the group inferred from the package path - if override := types.ExtractCommentTags("+", pkg.DocComments)["groupName"]; override != nil { + if override := types.ExtractCommentTags("+", pkg.Comments)["groupName"]; override != nil { groupName := override[0] klog.V(5).Infof("overriding the group name with = %s", groupName) gv.Group = clientgentypes.Group(groupName) diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/main.go b/vendor/k8s.io/code-generator/cmd/register-gen/main.go index 816bd8167..b8ba66f0d 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/main.go @@ -18,7 +18,6 @@ package main import ( "flag" - "path/filepath" "github.com/spf13/pflag" "k8s.io/klog/v2" @@ -26,13 +25,12 @@ import ( generatorargs "k8s.io/code-generator/cmd/register-gen/args" "k8s.io/code-generator/cmd/register-gen/generators" "k8s.io/code-generator/pkg/util" - "k8s.io/gengo/args" ) func main() { klog.InitFlags(nil) genericArgs := generatorargs.NewDefaults() - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.GoHeaderFilePath = util.BoilerplatePath() genericArgs.AddFlags(pflag.CommandLine) flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) diff --git a/vendor/k8s.io/code-generator/cmd/set-gen/main.go b/vendor/k8s.io/code-generator/cmd/set-gen/main.go index 2d98a1b32..f40c5e4a4 100644 --- a/vendor/k8s.io/code-generator/cmd/set-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/set-gen/main.go @@ -26,7 +26,6 @@ package main import ( "os" - "path/filepath" "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" @@ -40,7 +39,7 @@ func main() { arguments := args.Default() // Override defaults. - arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + arguments.GoHeaderFilePath = util.BoilerplatePath() arguments.InputDirs = []string{"k8s.io/kubernetes/pkg/util/sets/types"} arguments.OutputPackagePath = "k8s.io/apimachinery/pkg/util/sets" diff --git a/vendor/k8s.io/code-generator/doc.go b/vendor/k8s.io/code-generator/doc.go new file mode 100644 index 000000000..fd8673069 --- /dev/null +++ b/vendor/k8s.io/code-generator/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codegenerator // import "k8s.io/code-generator" diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh index 1c2fd7a55..cfc2576a8 100644 --- a/vendor/k8s.io/code-generator/generate-groups.sh +++ b/vendor/k8s.io/code-generator/generate-groups.sh @@ -47,10 +47,10 @@ GROUPS_WITH_VERSIONS="$4" shift 4 ( - # To support running this script from anywhere, we have to first cd into this directory - # so we can install the tools. + # To support running this script from anywhere, first cd into this directory, + # and then install with forced module mode on and fully qualified name. cd "$(dirname "${0}")" - go install ./cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} + GO111MODULE=on go install k8s.io/code-generator/cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} ) # Go installs the above commands to get installed in $GOBIN if defined, and $GOPATH/bin otherwise: GOBIN="$(go env GOBIN)" @@ -71,7 +71,7 @@ done if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then echo "Generating deepcopy funcs" - "${gobin}/deepcopy-gen" --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" -O zz_generated.deepcopy --bounding-dirs "${APIS_PKG}" "$@" + "${gobin}/deepcopy-gen" --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" -O zz_generated.deepcopy "$@" fi if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh index e4430b27d..2c114a370 100644 --- a/vendor/k8s.io/code-generator/generate-internal-groups.sh +++ b/vendor/k8s.io/code-generator/generate-internal-groups.sh @@ -47,7 +47,12 @@ EXT_APIS_PKG="$4" GROUPS_WITH_VERSIONS="$5" shift 5 -go install ./"$(dirname "${0}")"/cmd/{defaulter-gen,conversion-gen,client-gen,lister-gen,informer-gen,deepcopy-gen,openapi-gen} +( + # To support running this script from anywhere, first cd into this directory, + # and then install with forced module mode on and fully qualified name. + cd "$(dirname "${0}")" + GO111MODULE=on go install k8s.io/code-generator/cmd/{defaulter-gen,conversion-gen,client-gen,lister-gen,informer-gen,deepcopy-gen,openapi-gen} +) function codegen::join() { local IFS="$1"; shift; echo "$*"; } @@ -72,7 +77,7 @@ done if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then echo "Generating deepcopy funcs" - "${GOPATH}/bin/deepcopy-gen" --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" -O zz_generated.deepcopy --bounding-dirs "${INT_APIS_PKG},${EXT_APIS_PKG}" "$@" + "${GOPATH}/bin/deepcopy-gen" --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" -O zz_generated.deepcopy "$@" fi if [ "${GENS}" = "all" ] || grep -qw "defaulter" <<<"${GENS}"; then diff --git a/vendor/k8s.io/code-generator/pkg/util/build.go b/vendor/k8s.io/code-generator/pkg/util/build.go index 6ed1fe497..72ae683d8 100644 --- a/vendor/k8s.io/code-generator/pkg/util/build.go +++ b/vendor/k8s.io/code-generator/pkg/util/build.go @@ -18,10 +18,12 @@ package util import ( gobuild "go/build" - "path" + "os" "path/filepath" "reflect" "strings" + + "golang.org/x/tools/go/packages" ) type empty struct{} @@ -55,9 +57,37 @@ func hasSubdir(root, dir string) (rel string, ok bool) { return filepath.ToSlash(dir[len(root):]), true } -// BoilerplatePath uses the boilerplate in code-generator by calculating the relative path to it. +// BoilerplatePath returns the path to the boilerplate file in code-generator, +// or "" if the default boilerplate.go.txt file cannot be located. func BoilerplatePath() string { - return path.Join(reflect.TypeOf(empty{}).PkgPath(), "/../../hack/boilerplate.go.txt") + // set up paths to check + paths := []string{ + // works when run from root of $GOPATH containing k8s.io/code-generator + filepath.Join(reflect.TypeOf(empty{}).PkgPath(), "/../../hack/boilerplate.go.txt"), + // works when run from root of module vendoring k8s.io/code-generator + "vendor/k8s.io/code-generator/hack/boilerplate.go.txt", + // works when run from root of $GOPATH containing k8s.io/kubernetes + "k8s.io/kubernetes/vendor/k8s.io/code-generator/hack/boilerplate.go.txt", + } + + // see if we can locate the module directory and add that to the list + config := packages.Config{Mode: packages.NeedModule} + if loadedPackages, err := packages.Load(&config, "k8s.io/code-generator/pkg/util"); err == nil { + for _, loadedPackage := range loadedPackages { + if loadedPackage.Module != nil && loadedPackage.Module.Dir != "" { + paths = append(paths, filepath.Join(loadedPackage.Module.Dir, "hack/boilerplate.go.txt")) + } + } + } + + // try all paths and return the first that exists + for _, path := range paths { + if _, err := os.Stat(path); err == nil { + return path + } + } + // cannot be located, invoker will have to explicitly specify boilerplate file + return "" } // Vendorless trims vendor prefix from a package path to make it canonical diff --git a/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE b/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS b/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/code-generator/tools.go b/vendor/k8s.io/code-generator/tools.go index 7d13de5a1..fb797be03 100644 --- a/vendor/k8s.io/code-generator/tools.go +++ b/vendor/k8s.io/code-generator/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools /* @@ -18,7 +19,7 @@ limitations under the License. // This package contains code generation utilities // This package imports things required by build scripts, to force `go mod` to see them as dependencies -package tools +package codegenerator import ( _ "k8s.io/code-generator/cmd/client-gen" diff --git a/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go b/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go index a52faf919..11adc2683 100644 --- a/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go +++ b/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go @@ -23,39 +23,24 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -// ciphers maps strings into tls package cipher constants in -// https://golang.org/pkg/crypto/tls/#pkg-constants -// to be replaced by tls.CipherSuites() when the project migrates to go1.14. -var ciphers = map[string]uint16{ - "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, - "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, - "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, - "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, - "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, -} +var ( + // ciphers maps strings into tls package cipher constants in + // https://golang.org/pkg/crypto/tls/#pkg-constants + ciphers = map[string]uint16{} + insecureCiphers = map[string]uint16{} +) -// to be replaced by tls.InsecureCipherSuites() when the project migrates to go1.14. -var insecureCiphers = map[string]uint16{ - "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, +func init() { + for _, suite := range tls.CipherSuites() { + ciphers[suite.Name] = suite.ID + } + // keep legacy names for backward compatibility + ciphers["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"] = tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + ciphers["TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"] = tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + + for _, suite := range tls.InsecureCipherSuites() { + insecureCiphers[suite.Name] = suite.ID + } } // InsecureTLSCiphers returns the cipher suites implemented by crypto/tls which have diff --git a/vendor/k8s.io/component-base/cli/flag/flags.go b/vendor/k8s.io/component-base/cli/flag/flags.go index 70146f335..2388340d5 100644 --- a/vendor/k8s.io/component-base/cli/flag/flags.go +++ b/vendor/k8s.io/component-base/cli/flag/flags.go @@ -24,6 +24,8 @@ import ( "k8s.io/klog/v2" ) +var underscoreWarnings = make(map[string]bool) + // WordSepNormalizeFunc changes all flags that contain "_" separators func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { if strings.Contains(name, "_") { @@ -36,7 +38,10 @@ func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { if strings.Contains(name, "_") { nname := strings.Replace(name, "_", "-", -1) - klog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) + if _, alreadyWarned := underscoreWarnings[name]; !alreadyWarned { + klog.Warningf("using an underscore in a flag name is not supported. %s has been converted to %s.", name, nname) + underscoreWarnings[name] = true + } return pflag.NormalizedName(nname) } diff --git a/vendor/k8s.io/component-base/config/OWNERS b/vendor/k8s.io/component-base/config/OWNERS index 11d499d75..8d65ac8f2 100644 --- a/vendor/k8s.io/component-base/config/OWNERS +++ b/vendor/k8s.io/component-base/config/OWNERS @@ -7,8 +7,11 @@ approvers: - api-approvers reviewers: - api-reviewers +emeritus_reviewers: - luxas - mtaufen - sttts labels: - kind/api-change +- sig/api-machinery +- sig/scheduling diff --git a/vendor/k8s.io/component-base/config/types.go b/vendor/k8s.io/component-base/config/types.go index c6cd112af..52336173c 100644 --- a/vendor/k8s.io/component-base/config/types.go +++ b/vendor/k8s.io/component-base/config/types.go @@ -17,6 +17,14 @@ limitations under the License. package config import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -85,7 +93,122 @@ type LoggingConfiguration struct { // Format Flag specifies the structure of log messages. // default value of format is `text` Format string + // Maximum number of seconds between log flushes. Ignored if the + // selected logging backend writes log messages without buffering. + FlushFrequency time.Duration + // Verbosity is the threshold that determines which log messages are + // logged. Default is zero which logs only the most important + // messages. Higher values enable additional messages. Error messages + // are always logged. + Verbosity VerbosityLevel + // VModule overrides the verbosity threshold for individual files. + // Only supported for "text" log format. + VModule VModuleConfiguration // [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). // Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`) Sanitization bool + // [Experimental] Options holds additional parameters that are specific + // to the different logging formats. Only the options for the selected + // format get used, but all of them get validated. + Options FormatOptions +} + +// FormatOptions contains options for the different logging formats. +type FormatOptions struct { + // [Experimental] JSON contains options for logging format "json". + JSON JSONOptions +} + +// JSONOptions contains options for logging format "json". +type JSONOptions struct { + // [Experimental] SplitStream redirects error messages to stderr while + // info messages go to stdout, with buffering. The default is to write + // both to stdout, without buffering. + SplitStream bool + // [Experimental] InfoBufferSize sets the size of the info stream when + // using split streams. The default is zero, which disables buffering. + InfoBufferSize resource.QuantityValue +} + +// VModuleConfiguration is a collection of individual file names or patterns +// and the corresponding verbosity threshold. +type VModuleConfiguration []VModuleItem + +var _ pflag.Value = &VModuleConfiguration{} + +// VModuleItem defines verbosity for one or more files which match a certain +// glob pattern. +type VModuleItem struct { + // FilePattern is a base file name (i.e. minus the ".go" suffix and + // directory) or a "glob" pattern for such a name. It must not contain + // comma and equal signs because those are separators for the + // corresponding klog command line argument. + FilePattern string + // Verbosity is the threshold for log messages emitted inside files + // that match the pattern. + Verbosity VerbosityLevel +} + +// String returns the -vmodule parameter (comma-separated list of pattern=N). +func (vmodule *VModuleConfiguration) String() string { + var patterns []string + for _, item := range *vmodule { + patterns = append(patterns, fmt.Sprintf("%s=%d", item.FilePattern, item.Verbosity)) + } + return strings.Join(patterns, ",") +} + +// Set parses the -vmodule parameter (comma-separated list of pattern=N). +func (vmodule *VModuleConfiguration) Set(value string) error { + // This code mirrors https://github.com/kubernetes/klog/blob/9ad246211af1ed84621ee94a26fcce0038b69cd1/klog.go#L287-L313 + + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return fmt.Errorf("%q does not have the pattern=N format", pat) + } + pattern := patLev[0] + // 31 instead of 32 to ensure that it also fits into int32. + v, err := strconv.ParseUint(patLev[1], 10, 31) + if err != nil { + return fmt.Errorf("parsing verbosity in %q: %v", pat, err) + } + *vmodule = append(*vmodule, VModuleItem{FilePattern: pattern, Verbosity: VerbosityLevel(v)}) + } + return nil +} + +func (vmodule *VModuleConfiguration) Type() string { + return "pattern=N,..." +} + +// VerbosityLevel represents a klog or logr verbosity threshold. +type VerbosityLevel uint32 + +var _ pflag.Value = new(VerbosityLevel) + +func (l *VerbosityLevel) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +func (l *VerbosityLevel) Get() interface{} { + return *l +} + +func (l *VerbosityLevel) Set(value string) error { + // Limited to int32 for compatibility with klog. + v, err := strconv.ParseUint(value, 10, 31) + if err != nil { + return err + } + *l = VerbosityLevel(v) + return nil +} + +func (l *VerbosityLevel) Type() string { + return "Level" } diff --git a/vendor/k8s.io/component-base/config/v1alpha1/defaults.go b/vendor/k8s.io/component-base/config/v1alpha1/defaults.go index 098c5739d..57d066eda 100644 --- a/vendor/k8s.io/component-base/config/v1alpha1/defaults.go +++ b/vendor/k8s.io/component-base/config/v1alpha1/defaults.go @@ -19,6 +19,7 @@ package v1alpha1 import ( "time" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilpointer "k8s.io/utils/pointer" ) @@ -110,4 +111,18 @@ func RecommendedLoggingConfiguration(obj *LoggingConfiguration) { if obj.Format == "" { obj.Format = "text" } + var empty resource.QuantityValue + if obj.Options.JSON.InfoBufferSize == empty { + obj.Options.JSON.InfoBufferSize = resource.QuantityValue{ + // This is similar, but not quite the same as a default + // constructed instance. + Quantity: *resource.NewQuantity(0, resource.DecimalSI), + } + // This sets the unexported Quantity.s which will be compared + // by reflect.DeepEqual in some tests. + _ = obj.Options.JSON.InfoBufferSize.String() + } + if obj.FlushFrequency == 0 { + obj.FlushFrequency = 5 * time.Second + } } diff --git a/vendor/k8s.io/component-base/config/v1alpha1/types.go b/vendor/k8s.io/component-base/config/v1alpha1/types.go index cd56c1fcf..734e0d970 100644 --- a/vendor/k8s.io/component-base/config/v1alpha1/types.go +++ b/vendor/k8s.io/component-base/config/v1alpha1/types.go @@ -17,6 +17,9 @@ limitations under the License. package v1alpha1 import ( + "time" + + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -87,7 +90,56 @@ type LoggingConfiguration struct { // Format Flag specifies the structure of log messages. // default value of format is `text` Format string `json:"format,omitempty"` + // Maximum number of seconds between log flushes. Ignored if the + // selected logging backend writes log messages without buffering. + FlushFrequency time.Duration `json:"flushFrequency"` + // Verbosity is the threshold that determines which log messages are + // logged. Default is zero which logs only the most important + // messages. Higher values enable additional messages. Error messages + // are always logged. + Verbosity uint32 `json:"verbosity"` + // VModule overrides the verbosity threshold for individual files. + // Only supported for "text" log format. + VModule VModuleConfiguration `json:"vmodule,omitempty"` // [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). // Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`) Sanitization bool `json:"sanitization,omitempty"` + // [Experimental] Options holds additional parameters that are specific + // to the different logging formats. Only the options for the selected + // format get used, but all of them get validated. + Options FormatOptions `json:"options,omitempty"` +} + +// FormatOptions contains options for the different logging formats. +type FormatOptions struct { + // [Experimental] JSON contains options for logging format "json". + JSON JSONOptions `json:"json,omitempty"` +} + +// JSONOptions contains options for logging format "json". +type JSONOptions struct { + // [Experimental] SplitStream redirects error messages to stderr while + // info messages go to stdout, with buffering. The default is to write + // both to stdout, without buffering. + SplitStream bool `json:"splitStream,omitempty"` + // [Experimental] InfoBufferSize sets the size of the info stream when + // using split streams. The default is zero, which disables buffering. + InfoBufferSize resource.QuantityValue `json:"infoBufferSize,omitempty"` +} + +// VModuleConfiguration is a collection of individual file names or patterns +// and the corresponding verbosity threshold. +type VModuleConfiguration []VModuleItem + +// VModuleItem defines verbosity for one or more files which match a certain +// glob pattern. +type VModuleItem struct { + // FilePattern is a base file name (i.e. minus the ".go" suffix and + // directory) or a "glob" pattern for such a name. It must not contain + // comma and equal signs because those are separators for the + // corresponding klog command line argument. + FilePattern string `json:"filePattern"` + // Verbosity is the threshold for log messages emitted inside files + // that match the pattern. + Verbosity uint32 `json:"verbosity"` } diff --git a/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go index 5490ba3ba..93cf4cc64 100644 --- a/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -21,6 +22,9 @@ limitations under the License. package v1alpha1 import ( + time "time" + unsafe "unsafe" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" @@ -34,6 +38,36 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*FormatOptions)(nil), (*config.FormatOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FormatOptions_To_config_FormatOptions(a.(*FormatOptions), b.(*config.FormatOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.FormatOptions)(nil), (*FormatOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_FormatOptions_To_v1alpha1_FormatOptions(a.(*config.FormatOptions), b.(*FormatOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONOptions)(nil), (*config.JSONOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_JSONOptions_To_config_JSONOptions(a.(*JSONOptions), b.(*config.JSONOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.JSONOptions)(nil), (*JSONOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_JSONOptions_To_v1alpha1_JSONOptions(a.(*config.JSONOptions), b.(*JSONOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*VModuleItem)(nil), (*config.VModuleItem)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_VModuleItem_To_config_VModuleItem(a.(*VModuleItem), b.(*config.VModuleItem), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.VModuleItem)(nil), (*VModuleItem)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_VModuleItem_To_v1alpha1_VModuleItem(a.(*config.VModuleItem), b.(*VModuleItem), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*config.ClientConnectionConfiguration)(nil), (*ClientConnectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(a.(*config.ClientConnectionConfiguration), b.(*ClientConnectionConfiguration), scope) }); err != nil { @@ -115,6 +149,52 @@ func autoConvert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguratio return nil } +func autoConvert_v1alpha1_FormatOptions_To_config_FormatOptions(in *FormatOptions, out *config.FormatOptions, s conversion.Scope) error { + if err := Convert_v1alpha1_JSONOptions_To_config_JSONOptions(&in.JSON, &out.JSON, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_FormatOptions_To_config_FormatOptions is an autogenerated conversion function. +func Convert_v1alpha1_FormatOptions_To_config_FormatOptions(in *FormatOptions, out *config.FormatOptions, s conversion.Scope) error { + return autoConvert_v1alpha1_FormatOptions_To_config_FormatOptions(in, out, s) +} + +func autoConvert_config_FormatOptions_To_v1alpha1_FormatOptions(in *config.FormatOptions, out *FormatOptions, s conversion.Scope) error { + if err := Convert_config_JSONOptions_To_v1alpha1_JSONOptions(&in.JSON, &out.JSON, s); err != nil { + return err + } + return nil +} + +// Convert_config_FormatOptions_To_v1alpha1_FormatOptions is an autogenerated conversion function. +func Convert_config_FormatOptions_To_v1alpha1_FormatOptions(in *config.FormatOptions, out *FormatOptions, s conversion.Scope) error { + return autoConvert_config_FormatOptions_To_v1alpha1_FormatOptions(in, out, s) +} + +func autoConvert_v1alpha1_JSONOptions_To_config_JSONOptions(in *JSONOptions, out *config.JSONOptions, s conversion.Scope) error { + out.SplitStream = in.SplitStream + out.InfoBufferSize = in.InfoBufferSize + return nil +} + +// Convert_v1alpha1_JSONOptions_To_config_JSONOptions is an autogenerated conversion function. +func Convert_v1alpha1_JSONOptions_To_config_JSONOptions(in *JSONOptions, out *config.JSONOptions, s conversion.Scope) error { + return autoConvert_v1alpha1_JSONOptions_To_config_JSONOptions(in, out, s) +} + +func autoConvert_config_JSONOptions_To_v1alpha1_JSONOptions(in *config.JSONOptions, out *JSONOptions, s conversion.Scope) error { + out.SplitStream = in.SplitStream + out.InfoBufferSize = in.InfoBufferSize + return nil +} + +// Convert_config_JSONOptions_To_v1alpha1_JSONOptions is an autogenerated conversion function. +func Convert_config_JSONOptions_To_v1alpha1_JSONOptions(in *config.JSONOptions, out *JSONOptions, s conversion.Scope) error { + return autoConvert_config_JSONOptions_To_v1alpha1_JSONOptions(in, out, s) +} + func autoConvert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *config.LeaderElectionConfiguration, s conversion.Scope) error { if err := v1.Convert_Pointer_bool_To_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { return err @@ -143,12 +223,46 @@ func autoConvert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionCo func autoConvert_v1alpha1_LoggingConfiguration_To_config_LoggingConfiguration(in *LoggingConfiguration, out *config.LoggingConfiguration, s conversion.Scope) error { out.Format = in.Format + out.FlushFrequency = time.Duration(in.FlushFrequency) + out.Verbosity = config.VerbosityLevel(in.Verbosity) + out.VModule = *(*config.VModuleConfiguration)(unsafe.Pointer(&in.VModule)) out.Sanitization = in.Sanitization + if err := Convert_v1alpha1_FormatOptions_To_config_FormatOptions(&in.Options, &out.Options, s); err != nil { + return err + } return nil } func autoConvert_config_LoggingConfiguration_To_v1alpha1_LoggingConfiguration(in *config.LoggingConfiguration, out *LoggingConfiguration, s conversion.Scope) error { out.Format = in.Format + out.FlushFrequency = time.Duration(in.FlushFrequency) + out.Verbosity = uint32(in.Verbosity) + out.VModule = *(*VModuleConfiguration)(unsafe.Pointer(&in.VModule)) out.Sanitization = in.Sanitization + if err := Convert_config_FormatOptions_To_v1alpha1_FormatOptions(&in.Options, &out.Options, s); err != nil { + return err + } return nil } + +func autoConvert_v1alpha1_VModuleItem_To_config_VModuleItem(in *VModuleItem, out *config.VModuleItem, s conversion.Scope) error { + out.FilePattern = in.FilePattern + out.Verbosity = config.VerbosityLevel(in.Verbosity) + return nil +} + +// Convert_v1alpha1_VModuleItem_To_config_VModuleItem is an autogenerated conversion function. +func Convert_v1alpha1_VModuleItem_To_config_VModuleItem(in *VModuleItem, out *config.VModuleItem, s conversion.Scope) error { + return autoConvert_v1alpha1_VModuleItem_To_config_VModuleItem(in, out, s) +} + +func autoConvert_config_VModuleItem_To_v1alpha1_VModuleItem(in *config.VModuleItem, out *VModuleItem, s conversion.Scope) error { + out.FilePattern = in.FilePattern + out.Verbosity = uint32(in.Verbosity) + return nil +} + +// Convert_config_VModuleItem_To_v1alpha1_VModuleItem is an autogenerated conversion function. +func Convert_config_VModuleItem_To_v1alpha1_VModuleItem(in *config.VModuleItem, out *VModuleItem, s conversion.Scope) error { + return autoConvert_config_VModuleItem_To_v1alpha1_VModuleItem(in, out, s) +} diff --git a/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go index f5f2a0e91..5503e7a86 100644 --- a/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -62,6 +63,40 @@ func (in *DebuggingConfiguration) DeepCopy() *DebuggingConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatOptions) DeepCopyInto(out *FormatOptions) { + *out = *in + in.JSON.DeepCopyInto(&out.JSON) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatOptions. +func (in *FormatOptions) DeepCopy() *FormatOptions { + if in == nil { + return nil + } + out := new(FormatOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONOptions) DeepCopyInto(out *JSONOptions) { + *out = *in + in.InfoBufferSize.DeepCopyInto(&out.InfoBufferSize) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONOptions. +func (in *JSONOptions) DeepCopy() *JSONOptions { + if in == nil { + return nil + } + out := new(JSONOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { *out = *in @@ -89,6 +124,12 @@ func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoggingConfiguration) DeepCopyInto(out *LoggingConfiguration) { *out = *in + if in.VModule != nil { + in, out := &in.VModule, &out.VModule + *out = make(VModuleConfiguration, len(*in)) + copy(*out, *in) + } + in.Options.DeepCopyInto(&out.Options) return } @@ -101,3 +142,39 @@ func (in *LoggingConfiguration) DeepCopy() *LoggingConfiguration { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in VModuleConfiguration) DeepCopyInto(out *VModuleConfiguration) { + { + in := &in + *out = make(VModuleConfiguration, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VModuleConfiguration. +func (in VModuleConfiguration) DeepCopy() VModuleConfiguration { + if in == nil { + return nil + } + out := new(VModuleConfiguration) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VModuleItem) DeepCopyInto(out *VModuleItem) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VModuleItem. +func (in *VModuleItem) DeepCopy() *VModuleItem { + if in == nil { + return nil + } + out := new(VModuleItem) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/component-base/config/zz_generated.deepcopy.go b/vendor/k8s.io/component-base/config/zz_generated.deepcopy.go index 77260a06f..ede25cd32 100644 --- a/vendor/k8s.io/component-base/config/zz_generated.deepcopy.go +++ b/vendor/k8s.io/component-base/config/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -52,6 +53,40 @@ func (in *DebuggingConfiguration) DeepCopy() *DebuggingConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatOptions) DeepCopyInto(out *FormatOptions) { + *out = *in + in.JSON.DeepCopyInto(&out.JSON) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatOptions. +func (in *FormatOptions) DeepCopy() *FormatOptions { + if in == nil { + return nil + } + out := new(FormatOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONOptions) DeepCopyInto(out *JSONOptions) { + *out = *in + in.InfoBufferSize.DeepCopyInto(&out.InfoBufferSize) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONOptions. +func (in *JSONOptions) DeepCopy() *JSONOptions { + if in == nil { + return nil + } + out := new(JSONOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { *out = *in @@ -74,6 +109,12 @@ func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoggingConfiguration) DeepCopyInto(out *LoggingConfiguration) { *out = *in + if in.VModule != nil { + in, out := &in.VModule, &out.VModule + *out = make(VModuleConfiguration, len(*in)) + copy(*out, *in) + } + in.Options.DeepCopyInto(&out.Options) return } @@ -86,3 +127,39 @@ func (in *LoggingConfiguration) DeepCopy() *LoggingConfiguration { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in VModuleConfiguration) DeepCopyInto(out *VModuleConfiguration) { + { + in := &in + *out = make(VModuleConfiguration, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VModuleConfiguration. +func (in VModuleConfiguration) DeepCopy() VModuleConfiguration { + if in == nil { + return nil + } + out := new(VModuleConfiguration) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VModuleItem) DeepCopyInto(out *VModuleItem) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VModuleItem. +func (in *VModuleItem) DeepCopy() *VModuleItem { + if in == nil { + return nil + } + out := new(VModuleItem) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/component-base/featuregate/OWNERS b/vendor/k8s.io/component-base/featuregate/OWNERS new file mode 100644 index 000000000..48e811bf4 --- /dev/null +++ b/vendor/k8s.io/component-base/featuregate/OWNERS @@ -0,0 +1,17 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Currently assigned to api-approvers since feature gates are the API +# for enabling/disabling other APIs. + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: +- api-approvers +reviewers: +- api-reviewers + +labels: +- kind/api-change +- sig/api-machinery +- sig/cluster-lifecycle diff --git a/vendor/k8s.io/component-base/logs/config.go b/vendor/k8s.io/component-base/logs/config.go index 8a029fa7c..a3fe6fb5f 100644 --- a/vendor/k8s.io/component-base/logs/config.go +++ b/vendor/k8s.io/component-base/logs/config.go @@ -19,11 +19,14 @@ package logs import ( "flag" "fmt" + "sort" "strings" "github.com/spf13/pflag" + cliflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/config" + "k8s.io/component-base/logs/registry" "k8s.io/klog/v2" ) @@ -33,59 +36,83 @@ const ( JSONLogFormat = "json" ) -// LogRegistry is new init LogFormatRegistry struct -var LogRegistry = NewLogFormatRegistry() +// loggingFlags captures the state of the logging flags, in particular their default value +// before flag parsing. It is used by UnsupportedLoggingFlags. +var loggingFlags pflag.FlagSet func init() { // Text format is default klog format - LogRegistry.Register(DefaultLogFormat, nil) + registry.LogRegistry.Register(DefaultLogFormat, nil) + + var fs flag.FlagSet + klog.InitFlags(&fs) + loggingFlags.AddGoFlagSet(&fs) } // List of logs (k8s.io/klog + k8s.io/component-base/logs) flags supported by all logging formats var supportedLogsFlags = map[string]struct{}{ "v": {}, - // TODO: support vmodule after 1.19 Alpha } -// BindLoggingFlags binds the Options struct fields to a flagset +// BindLoggingFlags binds the Options struct fields to a flagset. +// +// Programs using LoggingConfiguration must use SkipLoggingConfigurationFlags +// when calling AddFlags to avoid the duplicate registration of flags. func BindLoggingFlags(c *config.LoggingConfiguration, fs *pflag.FlagSet) { - normalizeFunc := func(name string) string { - f := fs.GetNormalizeFunc() - return string(f(fs, name)) - } - unsupportedFlags := fmt.Sprintf("--%s", strings.Join(UnsupportedLoggingFlags(normalizeFunc), ", --")) - formats := fmt.Sprintf(`"%s"`, strings.Join(LogRegistry.List(), `", "`)) + // The help text is generated assuming that flags will eventually use + // hyphens, even if currently no normalization function is set for the + // flag set yet. + unsupportedFlags := strings.Join(unsupportedLoggingFlagNames(cliflag.WordSepNormalizeFunc), ", ") + formats := fmt.Sprintf(`"%s"`, strings.Join(registry.LogRegistry.List(), `", "`)) fs.StringVar(&c.Format, "logging-format", c.Format, fmt.Sprintf("Sets the log format. Permitted formats: %s.\nNon-default formats don't honor these flags: %s.\nNon-default choices are currently alpha and subject to change without warning.", formats, unsupportedFlags)) // No new log formats should be added after generation is of flag options - LogRegistry.Freeze() + registry.LogRegistry.Freeze() + + fs.DurationVar(&c.FlushFrequency, logFlushFreqFlagName, logFlushFreq, "Maximum number of seconds between log flushes") + fs.VarP(&c.Verbosity, "v", "v", "number for the log level verbosity") + fs.Var(&c.VModule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)") fs.BoolVar(&c.Sanitization, "experimental-logging-sanitization", c.Sanitization, `[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`) + + // JSON options. We only register them if "json" is a valid format. The + // config file API however always has them. + if _, err := registry.LogRegistry.Get("json"); err == nil { + fs.BoolVar(&c.Options.JSON.SplitStream, "log-json-split-stream", false, "[Experimental] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout.") + fs.Var(&c.Options.JSON.InfoBufferSize, "log-json-info-buffer-size", "[Experimental] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi).") + } } -// UnsupportedLoggingFlags lists unsupported logging flags -func UnsupportedLoggingFlags(normalizeFunc func(name string) string) []string { - allFlags := []string{} - - // k8s.io/klog flags - fs := &flag.FlagSet{} - klog.InitFlags(fs) - fs.VisitAll(func(flag *flag.Flag) { +// UnsupportedLoggingFlags lists unsupported logging flags. The normalize +// function is optional. +func UnsupportedLoggingFlags(normalizeFunc func(f *pflag.FlagSet, name string) pflag.NormalizedName) []*pflag.Flag { + // k8s.io/component-base/logs and klog flags + pfs := &pflag.FlagSet{} + loggingFlags.VisitAll(func(flag *pflag.Flag) { if _, found := supportedLogsFlags[flag.Name]; !found { - name := flag.Name - if normalizeFunc != nil { - name = normalizeFunc(name) - } - allFlags = append(allFlags, name) + // Normalization changes flag.Name, so make a copy. + clone := *flag + pfs.AddFlag(&clone) } }) - // k8s.io/component-base/logs flags - pfs := &pflag.FlagSet{} - AddFlags(pfs) + // Apply normalization. + pfs.SetNormalizeFunc(normalizeFunc) + + var allFlags []*pflag.Flag pfs.VisitAll(func(flag *pflag.Flag) { - if _, found := supportedLogsFlags[flag.Name]; !found { - allFlags = append(allFlags, flag.Name) - } + allFlags = append(allFlags, flag) }) return allFlags } + +// unsupportedLoggingFlagNames lists unsupported logging flags by name, with +// optional normalization and sorted. +func unsupportedLoggingFlagNames(normalizeFunc func(f *pflag.FlagSet, name string) pflag.NormalizedName) []string { + unsupportedFlags := UnsupportedLoggingFlags(normalizeFunc) + names := make([]string, 0, len(unsupportedFlags)) + for _, f := range unsupportedFlags { + names = append(names, "--"+f.Name) + } + sort.Strings(names) + return names +} diff --git a/vendor/k8s.io/component-base/logs/logs.go b/vendor/k8s.io/component-base/logs/logs.go index 073e0312f..0f494deed 100644 --- a/vendor/k8s.io/component-base/logs/logs.go +++ b/vendor/k8s.io/component-base/logs/logs.go @@ -14,6 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package logs contains support for logging options, flags and setup. +// Commands must explicitly enable command line flags. They no longer +// get added automatically when importing this package. package logs import ( @@ -28,17 +31,131 @@ import ( ) const logFlushFreqFlagName = "log-flush-frequency" +const deprecated = "will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components" -var logFlushFreq = pflag.Duration(logFlushFreqFlagName, 5*time.Second, "Maximum number of seconds between log flushes") +// TODO (https://github.com/kubernetes/kubernetes/issues/105310): once klog +// flags are removed, stop warning about "Non-default formats don't honor these +// flags" in config.go and instead add this remark here. +// +// const vmoduleUsage = " (only works for the default text log format)" + +var ( + packageFlags = flag.NewFlagSet("logging", flag.ContinueOnError) + logrFlush func() + + // Periodic flushing gets configured either via the global flag + // in this file or via LoggingConfiguration. + logFlushFreq time.Duration + logFlushFreqAdded bool +) func init() { - klog.InitFlags(flag.CommandLine) + klog.InitFlags(packageFlags) + packageFlags.DurationVar(&logFlushFreq, logFlushFreqFlagName, 5*time.Second, "Maximum number of seconds between log flushes") } -// AddFlags registers this package's flags on arbitrary FlagSets, such that they point to the -// same value as the global flags. -func AddFlags(fs *pflag.FlagSet) { - fs.AddFlag(pflag.Lookup(logFlushFreqFlagName)) +type addFlagsOptions struct { + skipLoggingConfigurationFlags bool +} + +type Option func(*addFlagsOptions) + +// SkipLoggingConfigurationFlags must be used as option for AddFlags when +// the program also uses a LoggingConfiguration struct for configuring +// logging. Then only flags not covered by that get added. +func SkipLoggingConfigurationFlags() Option { + return func(o *addFlagsOptions) { + o.skipLoggingConfigurationFlags = true + } +} + +// AddFlags registers this package's flags on arbitrary FlagSets. This includes +// the klog flags, with the original underscore as separator between. If +// commands want hyphens as separators, they can set +// k8s.io/component-base/cli/flag/WordSepNormalizeFunc as normalization +// function on the flag set before calling AddFlags. +// +// May be called more than once. +func AddFlags(fs *pflag.FlagSet, opts ...Option) { + // Determine whether the flags are already present by looking up one + // which always should exist. + if fs.Lookup("logtostderr") != nil { + return + } + + o := addFlagsOptions{} + for _, opt := range opts { + opt(&o) + } + + // Add flags with pflag deprecation remark for some klog flags. + packageFlags.VisitAll(func(f *flag.Flag) { + pf := pflag.PFlagFromGoFlag(f) + switch f.Name { + case "v": + // unchanged, potentially skip it + if o.skipLoggingConfigurationFlags { + return + } + case logFlushFreqFlagName: + // unchanged, potentially skip it + if o.skipLoggingConfigurationFlags { + return + } + logFlushFreqAdded = true + case "vmodule": + // TODO: see above + // pf.Usage += vmoduleUsage + if o.skipLoggingConfigurationFlags { + return + } + default: + // deprecated, but not hidden + pf.Deprecated = deprecated + } + fs.AddFlag(pf) + }) +} + +// AddGoFlags is a variant of AddFlags for traditional Go flag.FlagSet. +// Commands should use pflag whenever possible for the sake of consistency. +// Cases where this function is needed include tests (they have to set up flags +// in flag.CommandLine) and commands that for historic reasons use Go +// flag.Parse and cannot change to pflag because it would break their command +// line interface. +func AddGoFlags(fs *flag.FlagSet, opts ...Option) { + o := addFlagsOptions{} + for _, opt := range opts { + opt(&o) + } + + // Add flags with deprecation remark added to the usage text of + // some klog flags. + packageFlags.VisitAll(func(f *flag.Flag) { + usage := f.Usage + switch f.Name { + case "v": + // unchanged + if o.skipLoggingConfigurationFlags { + return + } + case logFlushFreqFlagName: + // unchanged + if o.skipLoggingConfigurationFlags { + return + } + logFlushFreqAdded = true + case "vmodule": + // TODO: see above + // usage += vmoduleUsage + if o.skipLoggingConfigurationFlags { + return + } + default: + usage += " (DEPRECATED: " + deprecated + ")" + } + fs.Var(f.Value, f.Name, usage) + }) } // KlogWriter serves as a bridge between the standard log package and the glog package. @@ -50,17 +167,27 @@ func (writer KlogWriter) Write(data []byte) (n int, err error) { return len(data), nil } -// InitLogs initializes logs the way we want for kubernetes. +// InitLogs initializes logs the way we want for Kubernetes. +// It should be called after parsing flags. If called before that, +// it will use the default log settings. func InitLogs() { log.SetOutput(KlogWriter{}) log.SetFlags(0) - // The default glog flush interval is 5 seconds. - go wait.Forever(klog.Flush, *logFlushFreq) + if logFlushFreqAdded { + // The flag from this file was activated, so use it now. + // Otherwise LoggingConfiguration.Apply will do this. + go wait.Forever(FlushLogs, logFlushFreq) + } } -// FlushLogs flushes logs immediately. +// FlushLogs flushes logs immediately. This should be called at the end of +// the main function via defer to ensure that all pending log messages +// are printed before exiting the program. func FlushLogs() { klog.Flush() + if logrFlush != nil { + logrFlush() + } } // NewLogger creates a new log.Logger which sends logs to klog.Info. diff --git a/vendor/k8s.io/component-base/logs/options.go b/vendor/k8s.io/component-base/logs/options.go index 46e52a0a1..8e3e9fffe 100644 --- a/vendor/k8s.io/component-base/logs/options.go +++ b/vendor/k8s.io/component-base/logs/options.go @@ -17,13 +17,17 @@ limitations under the License. package logs import ( + "fmt" + "github.com/spf13/pflag" - "k8s.io/klog/v2" - + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/component-base/config" "k8s.io/component-base/config/v1alpha1" + "k8s.io/component-base/logs/registry" "k8s.io/component-base/logs/sanitization" + "k8s.io/klog/v2" ) // Options has klog format parameters @@ -40,9 +44,22 @@ func NewOptions() *Options { return o } -// Validate verifies if any unsupported flag is set +// ValidateAndApply combines validation and application of the logging configuration. +// This should be invoked as early as possible because then the rest of the program +// startup (including validation of other options) will already run with the final +// logging configuration. +func (o *Options) ValidateAndApply() error { + errs := o.validate() + if len(errs) > 0 { + return utilerrors.NewAggregate(errs) + } + o.apply() + return nil +} + +// validate verifies if any unsupported flag is set // for non-default logging format -func (o *Options) Validate() []error { +func (o *Options) validate() []error { errs := ValidateLoggingConfiguration(&o.Config, nil) if len(errs) != 0 { return errs.ToAggregate().Errors() @@ -50,17 +67,33 @@ func (o *Options) Validate() []error { return nil } -// AddFlags add logging-format flag +// AddFlags add logging-format flag. +// +// Programs using LoggingConfiguration must use SkipLoggingConfigurationFlags +// when calling AddFlags to avoid the duplicate registration of flags. func (o *Options) AddFlags(fs *pflag.FlagSet) { BindLoggingFlags(&o.Config, fs) } -// Apply set klog logger from LogFormat type -func (o *Options) Apply() { +// apply set klog logger from LogFormat type +func (o *Options) apply() { // if log format not exists, use nil loggr - loggr, _ := LogRegistry.Get(o.Config.Format) - klog.SetLogger(loggr) + factory, _ := registry.LogRegistry.Get(o.Config.Format) + if factory == nil { + klog.ClearLogger() + } else { + log, flush := factory.Create(o.Config.Options) + klog.SetLogger(log) + logrFlush = flush + } if o.Config.Sanitization { klog.SetLogFilter(&sanitization.SanitizingFilter{}) } + if err := loggingFlags.Lookup("v").Value.Set(o.Config.Verbosity.String()); err != nil { + panic(fmt.Errorf("internal error while setting klog verbosity: %v", err)) + } + if err := loggingFlags.Lookup("vmodule").Value.Set(o.Config.VModule.String()); err != nil { + panic(fmt.Errorf("internal error while setting klog vmodule: %v", err)) + } + go wait.Forever(FlushLogs, o.Config.FlushFrequency) } diff --git a/vendor/k8s.io/component-base/logs/registry.go b/vendor/k8s.io/component-base/logs/registry/registry.go similarity index 65% rename from vendor/k8s.io/component-base/logs/registry.go rename to vendor/k8s.io/component-base/logs/registry/registry.go index 150af394c..145c0b8fd 100644 --- a/vendor/k8s.io/component-base/logs/registry.go +++ b/vendor/k8s.io/component-base/logs/registry/registry.go @@ -14,43 +14,59 @@ See the License for the specific language governing permissions and limitations under the License. */ -package logs +package registry import ( "fmt" "sort" "github.com/go-logr/logr" + + "k8s.io/component-base/config" ) +// LogRegistry is new init LogFormatRegistry struct +var LogRegistry = NewLogFormatRegistry() + // LogFormatRegistry store klog format registry type LogFormatRegistry struct { - registry map[string]logr.Logger + registry map[string]LogFormatFactory frozen bool } +// LogFormatFactory provides support for a certain additional, +// non-default log format. +type LogFormatFactory interface { + // Create returns a logger with the requested configuration. + // Returning a flush function for the logger is optional. + // If provided, the caller must ensure that it is called + // periodically (if desired) and at program exit. + Create(options config.FormatOptions) (log logr.Logger, flush func()) +} + // NewLogFormatRegistry return new init LogFormatRegistry struct func NewLogFormatRegistry() *LogFormatRegistry { return &LogFormatRegistry{ - registry: make(map[string]logr.Logger), + registry: make(map[string]LogFormatFactory), frozen: false, } } -// Register new log format registry to global logRegistry -func (lfr *LogFormatRegistry) Register(name string, logger logr.Logger) error { +// Register new log format registry to global logRegistry. +// nil is valid and selects the default klog output. +func (lfr *LogFormatRegistry) Register(name string, factory LogFormatFactory) error { if lfr.frozen { return fmt.Errorf("log format is frozen, unable to register log format") } if _, ok := lfr.registry[name]; ok { return fmt.Errorf("log format: %s already exists", name) } - lfr.registry[name] = logger + lfr.registry[name] = factory return nil } // Get specified log format logger -func (lfr *LogFormatRegistry) Get(name string) (logr.Logger, error) { +func (lfr *LogFormatRegistry) Get(name string) (LogFormatFactory, error) { re, ok := lfr.registry[name] if !ok { return nil, fmt.Errorf("log format: %s does not exists", name) @@ -59,12 +75,12 @@ func (lfr *LogFormatRegistry) Get(name string) (logr.Logger, error) { } // Set specified log format logger -func (lfr *LogFormatRegistry) Set(name string, logger logr.Logger) error { +func (lfr *LogFormatRegistry) Set(name string, factory LogFormatFactory) error { if lfr.frozen { return fmt.Errorf("log format is frozen, unable to set log format") } - lfr.registry[name] = logger + lfr.registry[name] = factory return nil } diff --git a/vendor/k8s.io/component-base/logs/validate.go b/vendor/k8s.io/component-base/logs/validate.go index 86f1cecf3..694e95d46 100644 --- a/vendor/k8s.io/component-base/logs/validate.go +++ b/vendor/k8s.io/component-base/logs/validate.go @@ -17,52 +17,54 @@ limitations under the License. package logs import ( - "flag" "fmt" + "math" "strings" - "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/util/validation/field" + cliflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/config" + "k8s.io/component-base/logs/registry" ) func ValidateLoggingConfiguration(c *config.LoggingConfiguration, fldPath *field.Path) field.ErrorList { errs := field.ErrorList{} if c.Format != DefaultLogFormat { - allFlags := UnsupportedLoggingFlags(hyphensToUnderscores) - for _, fname := range allFlags { - if flagIsSet(fname, hyphensToUnderscores) { - errs = append(errs, field.Invalid(fldPath.Child("format"), c.Format, fmt.Sprintf("Non-default format doesn't honor flag: %s", fname))) + // WordSepNormalizeFunc is just a guess. Commands should use it, + // but we cannot know for sure. + allFlags := UnsupportedLoggingFlags(cliflag.WordSepNormalizeFunc) + for _, f := range allFlags { + if f.DefValue != f.Value.String() { + errs = append(errs, field.Invalid(fldPath.Child("format"), c.Format, fmt.Sprintf("Non-default format doesn't honor flag: %s", f.Name))) } } } - if _, err := LogRegistry.Get(c.Format); err != nil { + _, err := registry.LogRegistry.Get(c.Format) + if err != nil { errs = append(errs, field.Invalid(fldPath.Child("format"), c.Format, "Unsupported log format")) } - return errs -} -// hyphensToUnderscores replaces hyphens with underscores -// we should always use underscores instead of hyphens when validate flags -func hyphensToUnderscores(s string) string { - return strings.Replace(s, "-", "_", -1) -} - -func flagIsSet(name string, normalizeFunc func(name string) string) bool { - f := flag.Lookup(name) - if f != nil { - return f.DefValue != f.Value.String() + // The type in our struct is uint32, but klog only accepts positive int32. + if c.Verbosity > math.MaxInt32 { + errs = append(errs, field.Invalid(fldPath.Child("verbosity"), c.Verbosity, fmt.Sprintf("Must be <= %d", math.MaxInt32))) } - if normalizeFunc != nil { - f = flag.Lookup(normalizeFunc(name)) - if f != nil { - return f.DefValue != f.Value.String() + vmoduleFldPath := fldPath.Child("vmodule") + if len(c.VModule) > 0 && c.Format != "" && c.Format != "text" { + errs = append(errs, field.Forbidden(vmoduleFldPath, "Only supported for text log format")) + } + for i, item := range c.VModule { + if item.FilePattern == "" { + errs = append(errs, field.Required(vmoduleFldPath.Index(i), "File pattern must not be empty")) + } + if strings.ContainsAny(item.FilePattern, "=,") { + errs = append(errs, field.Invalid(vmoduleFldPath.Index(i), item.FilePattern, "File pattern must not contain equal sign or comma")) + } + if item.Verbosity > math.MaxInt32 { + errs = append(errs, field.Invalid(vmoduleFldPath.Index(i), item.Verbosity, fmt.Sprintf("Must be <= %d", math.MaxInt32))) } } - pf := pflag.Lookup(name) - if pf != nil { - return pf.DefValue != pf.Value.String() - } - panic("failed to lookup unsupported log flag") + + // Currently nothing to validate for c.Options. + + return errs } diff --git a/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go index ff38953ba..56a9dcae5 100644 --- a/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go +++ b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go @@ -44,9 +44,9 @@ var ( ) func init() { - //lint:ignore SA1019 - replacement function still calls prometheus.NewProcessCollector(). + //nolint:staticcheck // SA1019 - replacement function still calls prometheus.NewProcessCollector(). RawMustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) - //lint:ignore SA1019 - replacement function still calls prometheus.NewGoCollector(). + //nolint:staticcheck // SA1019 - replacement function still calls prometheus.NewGoCollector(). RawMustRegister(prometheus.NewGoCollector()) } diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go index 91a76ba7e..456fe0b0a 100644 --- a/vendor/k8s.io/component-base/metrics/options.go +++ b/vendor/k8s.io/component-base/metrics/options.go @@ -58,8 +58,8 @@ func (o *Options) Validate() []error { // AddFlags adds flags for exposing component metrics. func (o *Options) AddFlags(fs *pflag.FlagSet) { - if o != nil { - o = NewOptions() + if o == nil { + return } fs.StringVar(&o.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.ShowHiddenMetricsForVersion, "The previous version for which you want to show hidden metrics. "+ diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_others.go b/vendor/k8s.io/component-base/metrics/processstarttime_others.go index 89ea8a68e..a14cd8833 100644 --- a/vendor/k8s.io/component-base/metrics/processstarttime_others.go +++ b/vendor/k8s.io/component-base/metrics/processstarttime_others.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_windows.go b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go index 8fcdf273a..7813115e7 100644 --- a/vendor/k8s.io/component-base/metrics/processstarttime_windows.go +++ b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go index b608fb568..b01bc011e 100644 --- a/vendor/k8s.io/component-base/metrics/registry.go +++ b/vendor/k8s.io/component-base/metrics/registry.go @@ -274,7 +274,7 @@ func (kr *kubeRegistry) enableHiddenCollectors() { cs = append(cs, c) } - kr.hiddenCollectors = nil + kr.hiddenCollectors = make(map[string]Registerable) kr.hiddenCollectorsLock.Unlock() kr.MustRegister(cs...) } diff --git a/vendor/k8s.io/component-base/metrics/testutil/metrics.go b/vendor/k8s.io/component-base/metrics/testutil/metrics.go index 60a186483..df3f8ee0c 100644 --- a/vendor/k8s.io/component-base/metrics/testutil/metrics.go +++ b/vendor/k8s.io/component-base/metrics/testutil/metrics.go @@ -176,13 +176,87 @@ type Histogram struct { *dto.Histogram } -// GetHistogramFromGatherer collects a metric from a gatherer implementing k8s.io/component-base/metrics.Gatherer interface. +// HistogramVec wraps a slice of Histogram. +// Note that each Histogram must have the same number of buckets. +type HistogramVec []*Histogram + +// GetAggregatedSampleCount aggregates the sample count of each inner Histogram. +func (vec HistogramVec) GetAggregatedSampleCount() uint64 { + var count uint64 + for _, hist := range vec { + count += hist.GetSampleCount() + } + return count +} + +// GetAggregatedSampleSum aggregates the sample sum of each inner Histogram. +func (vec HistogramVec) GetAggregatedSampleSum() float64 { + var sum float64 + for _, hist := range vec { + sum += hist.GetSampleSum() + } + return sum +} + +// Quantile first aggregates inner buckets of each Histogram, and then +// computes q-th quantile of a cumulative histogram. +func (vec HistogramVec) Quantile(q float64) float64 { + var buckets []bucket + + for i, hist := range vec { + for j, bckt := range hist.Bucket { + if i == 0 { + buckets = append(buckets, bucket{ + count: float64(bckt.GetCumulativeCount()), + upperBound: bckt.GetUpperBound(), + }) + } else { + buckets[j].count += float64(bckt.GetCumulativeCount()) + } + } + } + + if len(buckets) == 0 || buckets[len(buckets)-1].upperBound != math.Inf(+1) { + // The list of buckets in dto.Histogram doesn't include the final +Inf bucket, so we + // add it here for the rest of the samples. + buckets = append(buckets, bucket{ + count: float64(vec.GetAggregatedSampleCount()), + upperBound: math.Inf(+1), + }) + } + + return bucketQuantile(q, buckets) +} + +// Average computes wrapped histograms' average value. +func (vec HistogramVec) Average() float64 { + return vec.GetAggregatedSampleSum() / float64(vec.GetAggregatedSampleCount()) +} + +// Validate makes sure the wrapped histograms have all necessary fields set and with valid values. +func (vec HistogramVec) Validate() error { + bucketSize := 0 + for i, hist := range vec { + if err := hist.Validate(); err != nil { + return err + } + if i == 0 { + bucketSize = len(hist.GetBucket()) + } else if bucketSize != len(hist.GetBucket()) { + return fmt.Errorf("found different bucket size: expect %v, but got %v at index %v", bucketSize, len(hist.GetBucket()), i) + } + } + return nil +} + +// GetHistogramVecFromGatherer collects a metric, that matches the input labelValue map, +// from a gatherer implementing k8s.io/component-base/metrics.Gatherer interface. // Used only for testing purposes where we need to gather metrics directly from a running binary (without metrics endpoint). -func GetHistogramFromGatherer(gatherer metrics.Gatherer, metricName string) (Histogram, error) { +func GetHistogramVecFromGatherer(gatherer metrics.Gatherer, metricName string, lvMap map[string]string) (HistogramVec, error) { var metricFamily *dto.MetricFamily m, err := gatherer.Gather() if err != nil { - return Histogram{}, err + return nil, err } for _, mFamily := range m { if mFamily.GetName() == metricName { @@ -192,23 +266,22 @@ func GetHistogramFromGatherer(gatherer metrics.Gatherer, metricName string) (His } if metricFamily == nil { - return Histogram{}, fmt.Errorf("metric %q not found", metricName) - } - - if metricFamily.GetMetric() == nil { - return Histogram{}, fmt.Errorf("metric %q is empty", metricName) + return nil, fmt.Errorf("metric %q not found", metricName) } if len(metricFamily.GetMetric()) == 0 { - return Histogram{}, fmt.Errorf("metric %q is empty", metricName) + return nil, fmt.Errorf("metric %q is empty", metricName) } - return Histogram{ - // Histograms are stored under the first index (based on observation). - // Given there's only one histogram registered per each metric name, accessing - // the first index is sufficient. - metricFamily.GetMetric()[0].GetHistogram(), - }, nil + vec := make(HistogramVec, 0) + for _, metric := range metricFamily.GetMetric() { + if LabelsMatch(metric, lvMap) { + if hist := metric.GetHistogram(); hist != nil { + vec = append(vec, &Histogram{hist}) + } + } + } + return vec, nil } func uint64Ptr(u uint64) *uint64 { @@ -266,7 +339,7 @@ func (hist *Histogram) Quantile(q float64) float64 { if len(buckets) == 0 || buckets[len(buckets)-1].upperBound != math.Inf(+1) { // The list of buckets in dto.Histogram doesn't include the final +Inf bucket, so we - // add it here for the reset of the samples. + // add it here for the rest of the samples. buckets = append(buckets, bucket{ count: float64(hist.GetSampleCount()), upperBound: math.Inf(+1), diff --git a/vendor/k8s.io/component-base/version/OWNERS b/vendor/k8s.io/component-base/version/OWNERS new file mode 100644 index 000000000..329ea1474 --- /dev/null +++ b/vendor/k8s.io/component-base/version/OWNERS @@ -0,0 +1,17 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Currently assigned this directory to sig-api-machinery since this is +# an interface to the version definition in "k8s.io/apimachinery/pkg/version", +# and also to sig-release since this version information is set up for +# each release. + +approvers: +- sig-api-machinery-api-approvers +- release-engineering-approvers +reviewers: +- sig-api-machinery-api-reviewers +- release-managers + +labels: +- sig/api-machinery +- sig/release diff --git a/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go b/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go index 68b81d47e..fe8270a9c 100644 --- a/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go +++ b/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go @@ -864,6 +864,8 @@ func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) { sw.Do("in, out := &in.$.name$, &out.$.name$\n", args) g.generateFor(ft, sw) sw.Do("}\n", nil) + case uft.Kind == types.Array: + sw.Do("out.$.name$ = in.$.name$\n", args) case uft.Kind == types.Struct: if ft.IsAssignable() { sw.Do("out.$.name$ = in.$.name$\n", args) diff --git a/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go b/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go index 72782b166..220ec7e0d 100644 --- a/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go +++ b/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go @@ -318,9 +318,18 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat } if len(inputTags) == 1 { var err error - typesPkg, err = context.AddDirectory(filepath.Join(pkg.Path, inputTags[0])) + + inputPath := inputTags[0] + if strings.HasPrefix(inputPath, "./") || strings.HasPrefix(inputPath, "../") { + // this is a relative dir, which will not work under gomodules. + // join with the local package path, but warn + klog.Warningf("relative path %s=%s will not work under gomodule mode; use full package path (as used by 'import') instead", inputTagName, inputPath) + inputPath = filepath.Join(pkg.Path, inputTags[0]) + } + + typesPkg, err = context.AddDirectory(inputPath) if err != nil { - klog.Fatalf("cannot import package %s", inputTags[0]) + klog.Fatalf("cannot import package %s", inputPath) } // update context.Order to the latest context.Universe orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} diff --git a/vendor/k8s.io/gengo/namer/namer.go b/vendor/k8s.io/gengo/namer/namer.go index d700a00a5..6feb2d0c4 100644 --- a/vendor/k8s.io/gengo/namer/namer.go +++ b/vendor/k8s.io/gengo/namer/namer.go @@ -17,7 +17,9 @@ limitations under the License. package namer import ( + "fmt" "path/filepath" + "strconv" "strings" "k8s.io/gengo/types" @@ -246,6 +248,12 @@ func (ns *NameStrategy) Name(t *types.Type) string { "Slice", ns.removePrefixAndSuffix(ns.Name(t.Elem)), }, ns.Suffix) + case types.Array: + name = ns.Join(ns.Prefix, []string{ + "Array", + ns.removePrefixAndSuffix(fmt.Sprintf("%d", t.Len)), + ns.removePrefixAndSuffix(ns.Name(t.Elem)), + }, ns.Suffix) case types.Pointer: name = ns.Join(ns.Prefix, []string{ "Pointer", @@ -340,6 +348,9 @@ func (r *rawNamer) Name(t *types.Type) string { name = "map[" + r.Name(t.Key) + "]" + r.Name(t.Elem) case types.Slice: name = "[]" + r.Name(t.Elem) + case types.Array: + l := strconv.Itoa(int(t.Len)) + name = "[" + l + "]" + r.Name(t.Elem) case types.Pointer: name = "*" + r.Name(t.Elem) case types.Struct: diff --git a/vendor/k8s.io/gengo/parser/parse.go b/vendor/k8s.io/gengo/parser/parse.go index 635afcf28..311d5dfb5 100644 --- a/vendor/k8s.io/gengo/parser/parse.go +++ b/vendor/k8s.io/gengo/parser/parse.go @@ -723,8 +723,7 @@ func (b *Builder) walkType(u types.Universe, useName *types.Name, in tc.Type) *t } out.Kind = types.Array out.Elem = b.walkType(u, nil, t.Elem()) - // TODO: need to store array length, otherwise raw type name - // cannot be properly written. + out.Len = in.(*tc.Array).Len() return out case *tc.Chan: out := u.Type(name) diff --git a/vendor/k8s.io/gengo/types/types.go b/vendor/k8s.io/gengo/types/types.go index 12ec1d975..124766e76 100644 --- a/vendor/k8s.io/gengo/types/types.go +++ b/vendor/k8s.io/gengo/types/types.go @@ -83,11 +83,13 @@ const ( // Interface is any type that could have differing types at run time. Interface Kind = "Interface" + // Array is just like slice, but has a fixed length. + Array Kind = "Array" + // The remaining types are included for completeness, but are not well // supported. - Array Kind = "Array" // Array is just like slice, but has a fixed length. - Chan Kind = "Chan" - Func Kind = "Func" + Chan Kind = "Chan" + Func Kind = "Func" // DeclarationOf is different from other Kinds; it indicates that instead of // representing an actual Type, the type is a declaration of an instance of @@ -350,7 +352,9 @@ type Type struct { // TODO: Add: // * channel direction - // * array length + + // If Kind == Array + Len int64 } // String returns the name of the type. diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index 380e514f2..ad5063fdf 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -15,5 +15,5 @@ approvers: - tallclair - piosz - brancz - - DirectXMan12 - lavalamp + - serathius diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 1e187f763..45efbb075 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -509,7 +509,7 @@ type loggingT struct { addDirHeader bool // If set, all output will be redirected unconditionally to the provided logr.Logger - logr logr.Logger + logr *logr.Logger // If true, messages will not be propagated to lower severity log levels oneOutput bool @@ -698,11 +698,11 @@ func (buf *buffer) someDigits(i, d int) int { return copy(buf.tmp[i:], buf.tmp[j:]) } -func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) println(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { buf, file, line := l.header(s, 0) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logr != nil { + // if logger is set, we clear the generated header as we rely on the backing + // logger implementation to print headers + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -710,18 +710,18 @@ func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args args = filter.Filter(args) } fmt.Fprintln(buf, args...) - l.output(s, logr, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, 0 /* depth */, file, line, false) } -func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { - l.printDepth(s, logr, filter, 1, args...) +func (l *loggingT) print(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { + l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -732,14 +732,14 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, de if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, depth, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) { +func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -750,17 +750,17 @@ func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, 0 /* depth */, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { + if logger != nil { l.putBuffer(buf) buf = l.getBuffer() } @@ -771,28 +771,28 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFil if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, 2 /* depth */, file, line, alsoToStderr) + l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } - if loggr != nil { - logr.WithCallDepth(loggr, depth+2).Error(err, msg, keysAndValues...) + if logger != nil { + logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...) return } l.printS(err, errorLog, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } - if loggr != nil { - logr.WithCallDepth(loggr, depth+2).Info(msg, keysAndValues...) + if logger != nil { + logger.WithCallDepth(depth+2).Info(msg, keysAndValues...) return } l.printS(nil, infoLog, depth+1, msg, keysAndValues...) @@ -862,11 +862,23 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { // Use as: // ... // klog.SetLogger(zapr.NewLogger(zapLog)) +// +// To remove a backing logr implemention, use ClearLogger. Setting an +// empty logger with SetLogger(logr.Logger{}) does not work. func SetLogger(logr logr.Logger) { logging.mu.Lock() defer logging.mu.Unlock() - logging.logr = logr + logging.logr = &logr +} + +// ClearLogger removes a backing logr implementation if one was set earlier +// with SetLogger. +func ClearLogger() { + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.logr = nil } // SetOutput sets the output destination for all severities @@ -904,7 +916,7 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) { l.mu.Lock() if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { @@ -916,9 +928,9 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, depth int, f // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == errorLog { - logr.WithCallDepth(l.logr, depth+3).Error(nil, string(data)) + l.logr.WithCallDepth(depth+3).Error(nil, string(data)) } else { - logr.WithCallDepth(log, depth+3).Info(string(data)) + log.WithCallDepth(depth + 3).Info(string(data)) } } else if l.toStderr { os.Stderr.Write(data) @@ -1269,7 +1281,7 @@ func (l *loggingT) setV(pc uintptr) Level { // See the documentation of V for more information. type Verbose struct { enabled bool - logr logr.Logger + logr *logr.Logger filter LogFilter } @@ -1277,7 +1289,8 @@ func newVerbose(level Level, b bool) Verbose { if logging.logr == nil { return Verbose{b, nil, logging.filter} } - return Verbose{b, logging.logr.V(int(level)), logging.filter} + v := logging.logr.V(int(level)) + return Verbose{b, &v, logging.filter} } // V reports whether verbosity at the call site is at least the requested level. @@ -1315,9 +1328,14 @@ func V(level Level) Verbose { if runtime.Callers(2, logging.pcs[:]) == 0 { return newVerbose(level, false) } - v, ok := logging.vmap[logging.pcs[0]] + // runtime.Callers returns "return PCs", but we want + // to look up the symbolic information for the call, + // so subtract 1 from the PC. runtime.CallersFrames + // would be cleaner, but allocates. + pc := logging.pcs[0] - 1 + v, ok := logging.vmap[pc] if !ok { - v = logging.setV(logging.pcs[0]) + v = logging.setV(pc) } return newVerbose(level, v >= level) } @@ -1573,6 +1591,15 @@ func (ref ObjectRef) String() string { return ref.Name } +// MarshalLog ensures that loggers with support for structured output will log +// as a struct by removing the String method via a custom type. +func (ref ObjectRef) MarshalLog() interface{} { + type or ObjectRef + return or(ref) +} + +var _ logr.Marshaler = ObjectRef{} + // KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface // this interface may expand in the future, but will always be a subset of the // kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface @@ -1603,3 +1630,20 @@ func KRef(namespace, name string) ObjectRef { Namespace: namespace, } } + +// KObjs returns slice of ObjectRef from an slice of ObjectMeta +func KObjs(arg interface{}) []ObjectRef { + s := reflect.ValueOf(arg) + if s.Kind() != reflect.Slice { + return nil + } + objectRefs := make([]ObjectRef, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, ok := s.Index(i).Interface().(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil + } + } + return objectRefs +} diff --git a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go new file mode 100644 index 000000000..6d716ed58 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go @@ -0,0 +1,410 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder3 + +import ( + "encoding/json" + "fmt" + restful "github.com/emicklei/go-restful" + "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/spec3" + "k8s.io/kube-openapi/pkg/util" + "k8s.io/kube-openapi/pkg/validation/spec" + "net/http" + "strings" +) + +const ( + OpenAPIVersion = "3.0" +) + +type openAPI struct { + config *common.Config + spec *spec3.OpenAPI + definitions map[string]common.OpenAPIDefinition +} + +func groupRoutesByPath(routes []restful.Route) map[string][]restful.Route { + pathToRoutes := make(map[string][]restful.Route) + for _, r := range routes { + pathToRoutes[r.Path] = append(pathToRoutes[r.Path], r) + } + return pathToRoutes +} + +func (o *openAPI) buildResponse(model interface{}, description string, content []string) (*spec3.Response, error) { + response := &spec3.Response{ + ResponseProps: spec3.ResponseProps{ + Description: description, + Content: make(map[string]*spec3.MediaType), + }, + } + + s, err := o.toSchema(util.GetCanonicalTypeName(model)) + if err != nil { + return nil, err + } + + for _, contentType := range content { + response.ResponseProps.Content[contentType] = &spec3.MediaType{ + MediaTypeProps: spec3.MediaTypeProps{ + Schema: s, + }, + } + } + return response, nil +} + +func (o *openAPI) buildOperations(route restful.Route, inPathCommonParamsMap map[interface{}]*spec3.Parameter) (*spec3.Operation, error) { + ret := &spec3.Operation{ + OperationProps: spec3.OperationProps{ + Description: route.Doc, + Responses: &spec3.Responses{ + ResponsesProps: spec3.ResponsesProps{ + StatusCodeResponses: make(map[int]*spec3.Response), + }, + }, + }, + } + var err error + if ret.OperationId, ret.Tags, err = o.config.GetOperationIDAndTags(&route); err != nil { + return ret, err + } + + // Build responses + for _, resp := range route.ResponseErrors { + ret.Responses.StatusCodeResponses[resp.Code], err = o.buildResponse(resp.Model, resp.Message, route.Produces) + if err != nil { + return ret, err + } + } + + // If there is no response but a write sample, assume that write sample is an http.StatusOK response. + if len(ret.Responses.StatusCodeResponses) == 0 && route.WriteSample != nil { + ret.Responses.StatusCodeResponses[http.StatusOK], err = o.buildResponse(route.WriteSample, "OK", route.Produces) + if err != nil { + return ret, err + } + } + + // TODO: Default response if needed. Common Response config + + ret.Parameters = make([]*spec3.Parameter, 0) + for _, param := range route.ParameterDocs { + _, isCommon := inPathCommonParamsMap[mapKeyFromParam(param)] + if !isCommon && param.Data().Kind != restful.BodyParameterKind { + openAPIParam, err := o.buildParameter(param.Data()) + if err != nil { + return ret, err + } + ret.Parameters = append(ret.Parameters, openAPIParam) + } + } + + body, err := o.buildRequestBody(route.ParameterDocs, route.ReadSample) + if err != nil { + return nil ,err + } + + if body != nil { + ret.RequestBody = body + } + return ret, nil +} + +func (o *openAPI) buildRequestBody(parameters []*restful.Parameter, bodySample interface{}) (*spec3.RequestBody, error) { + for _, param := range parameters { + if param.Data().Kind == restful.BodyParameterKind && bodySample != nil { + schema, err := o.toSchema(util.GetCanonicalTypeName(bodySample)) + if err != nil { + return nil, err + } + r := &spec3.RequestBody{ + RequestBodyProps: spec3.RequestBodyProps{ + Content: map[string]*spec3.MediaType{ + "application/json": &spec3.MediaType{ + MediaTypeProps: spec3.MediaTypeProps{ + Schema: schema, + }, + }, + }, + }, + } + return r, nil + } + } + return nil, nil +} + +func newOpenAPI(config *common.Config) openAPI { + o := openAPI{ + config: config, + spec: &spec3.OpenAPI{ + Version: "3.0.0", + Info: config.Info, + Paths: &spec3.Paths{ + Paths: map[string]*spec3.Path{}, + }, + Components: &spec3.Components{ + Schemas: map[string]*spec.Schema{}, + }, + }, + } + if o.config.GetOperationIDAndTags == nil { + o.config.GetOperationIDAndTags = func(r *restful.Route) (string, []string, error) { + return r.Operation, nil, nil + } + } + + if o.config.GetDefinitionName == nil { + o.config.GetDefinitionName = func(name string) (string, spec.Extensions) { + return name[strings.LastIndex(name, "/")+1:], nil + } + } + + o.definitions = o.config.GetDefinitions(func(name string) spec.Ref { + defName, _ := o.config.GetDefinitionName(name) + return spec.MustCreateRef("#/components/schemas/" + common.EscapeJsonPointer(defName)) + }) + + return o +} + +func (o *openAPI) buildOpenAPISpec(webServices []*restful.WebService) error { + pathsToIgnore := util.NewTrie(o.config.IgnorePrefixes) + for _, w := range webServices { + rootPath := w.RootPath() + if pathsToIgnore.HasPrefix(rootPath) { + continue + } + + commonParams, err := o.buildParameters(w.PathParameters()) + if err != nil { + return err + } + + for path, routes := range groupRoutesByPath(w.Routes()) { + // go-swagger has special variable definition {$NAME:*} that can only be + // used at the end of the path and it is not recognized by OpenAPI. + if strings.HasSuffix(path, ":*}") { + path = path[:len(path)-3] + "}" + } + if pathsToIgnore.HasPrefix(path) { + continue + } + + // Aggregating common parameters make API spec (and generated clients) simpler + inPathCommonParamsMap, err := o.findCommonParameters(routes) + if err != nil { + return err + } + pathItem, exists := o.spec.Paths.Paths[path] + if exists { + return fmt.Errorf("duplicate webservice route has been found for path: %v", path) + } + + pathItem = &spec3.Path{ + PathProps: spec3.PathProps{ + Parameters: make([]*spec3.Parameter, 0), + }, + } + + // add web services's parameters as well as any parameters appears in all ops, as common parameters + pathItem.Parameters = append(pathItem.Parameters, commonParams...) + for _, p := range inPathCommonParamsMap { + pathItem.Parameters = append(pathItem.Parameters, p) + } + sortParameters(pathItem.Parameters) + + for _, route := range routes { + op, _ := o.buildOperations(route, inPathCommonParamsMap) + + switch strings.ToUpper(route.Method) { + case "GET": + pathItem.Get = op + case "POST": + pathItem.Post = op + case "HEAD": + pathItem.Head = op + case "PUT": + pathItem.Put = op + case "DELETE": + pathItem.Delete = op + case "OPTIONS": + pathItem.Options = op + case "PATCH": + pathItem.Patch = op + } + + } + o.spec.Paths.Paths[path] = pathItem + } + } + return nil +} + +func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec3.OpenAPI, error) { + a := newOpenAPI(config) + err := a.buildOpenAPISpec(webServices) + if err != nil { + return nil, err + } + return a.spec, nil +} + +func (o *openAPI) findCommonParameters(routes []restful.Route) (map[interface{}]*spec3.Parameter, error) { + commonParamsMap := make(map[interface{}]*spec3.Parameter, 0) + paramOpsCountByName := make(map[interface{}]int, 0) + paramNameKindToDataMap := make(map[interface{}]restful.ParameterData, 0) + for _, route := range routes { + routeParamDuplicateMap := make(map[interface{}]bool) + s := "" + for _, param := range route.ParameterDocs { + m, _ := json.Marshal(param.Data()) + s += string(m) + "\n" + key := mapKeyFromParam(param) + if routeParamDuplicateMap[key] { + msg, _ := json.Marshal(route.ParameterDocs) + return commonParamsMap, fmt.Errorf("duplicate parameter %v for route %v, %v", param.Data().Name, string(msg), s) + } + routeParamDuplicateMap[key] = true + paramOpsCountByName[key]++ + paramNameKindToDataMap[key] = param.Data() + } + } + for key, count := range paramOpsCountByName { + paramData := paramNameKindToDataMap[key] + if count == len(routes) && paramData.Kind != restful.BodyParameterKind { + openAPIParam, err := o.buildParameter(paramData) + if err != nil { + return commonParamsMap, err + } + commonParamsMap[key] = openAPIParam + } + } + return commonParamsMap, nil +} + +func (o *openAPI) buildParameters(restParam []*restful.Parameter) (ret []*spec3.Parameter, err error) { + ret = make([]*spec3.Parameter, len(restParam)) + for i, v := range restParam { + ret[i], err = o.buildParameter(v.Data()) + if err != nil { + return ret, err + } + } + return ret, nil +} + +func (o *openAPI) buildParameter(restParam restful.ParameterData) (ret *spec3.Parameter, err error) { + ret = &spec3.Parameter{ + ParameterProps: spec3.ParameterProps{ + Name: restParam.Name, + Description: restParam.Description, + Required: restParam.Required, + }, + } + switch restParam.Kind { + case restful.BodyParameterKind: + return nil, nil + case restful.PathParameterKind: + ret.In = "path" + if !restParam.Required { + return ret, fmt.Errorf("path parameters should be marked at required for parameter %v", restParam) + } + case restful.QueryParameterKind: + ret.In = "query" + case restful.HeaderParameterKind: + ret.In = "header" + /* TODO: add support for the cookie param */ + default: + return ret, fmt.Errorf("unsupported restful parameter kind : %v", restParam.Kind) + } + openAPIType, openAPIFormat := common.OpenAPITypeFormat(restParam.DataType) + if openAPIType == "" { + return ret, fmt.Errorf("non-body Restful parameter type should be a simple type, but got : %v", restParam.DataType) + } + + ret.Schema = &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{openAPIType}, + Format: openAPIFormat, + UniqueItems: !restParam.AllowMultiple, + }, + } + return ret, nil +} + +func (o *openAPI) buildDefinitionRecursively(name string) error { + uniqueName, extensions := o.config.GetDefinitionName(name) + if _, ok := o.spec.Components.Schemas[uniqueName]; ok { + return nil + } + if item, ok := o.definitions[name]; ok { + schema := &spec.Schema{ + VendorExtensible: item.Schema.VendorExtensible, + SchemaProps: item.Schema.SchemaProps, + SwaggerSchemaProps: item.Schema.SwaggerSchemaProps, + } + if extensions != nil { + if schema.Extensions == nil { + schema.Extensions = spec.Extensions{} + } + for k, v := range extensions { + schema.Extensions[k] = v + } + } + o.spec.Components.Schemas[uniqueName] = schema + for _, v := range item.Dependencies { + if err := o.buildDefinitionRecursively(v); err != nil { + return err + } + } + } else { + return fmt.Errorf("cannot find model definition for %v. If you added a new type, you may need to add +k8s:openapi-gen=true to the package or type and run code-gen again", name) + } + return nil +} + +func (o *openAPI) buildDefinitionForType(name string) (string, error) { + if err := o.buildDefinitionRecursively(name); err != nil { + return "", err + } + defName, _ := o.config.GetDefinitionName(name) + return "#/components/schemas/" + common.EscapeJsonPointer(defName), nil +} + +func (o *openAPI) toSchema(name string) (_ *spec.Schema, err error) { + if openAPIType, openAPIFormat := common.OpenAPITypeFormat(name); openAPIType != "" { + return &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{openAPIType}, + Format: openAPIFormat, + }, + }, nil + } else { + ref, err := o.buildDefinitionForType(name) + if err != nil { + return nil, err + } + return &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: spec.MustCreateRef(ref), + }, + }, nil + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/builder3/util.go b/vendor/k8s.io/kube-openapi/pkg/builder3/util.go new file mode 100644 index 000000000..d977bdf90 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/builder3/util.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder3 + +import ( + "sort" + + "github.com/emicklei/go-restful" + "k8s.io/kube-openapi/pkg/spec3" +) + +func mapKeyFromParam(param *restful.Parameter) interface{} { + return struct { + Name string + Kind int + }{ + Name: param.Data().Name, + Kind: param.Data().Kind, + } +} + +func (s parameters) Len() int { return len(s) } +func (s parameters) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type parameters []*spec3.Parameter + +type byNameIn struct { + parameters +} + +func (s byNameIn) Less(i, j int) bool { + return s.parameters[i].Name < s.parameters[j].Name || (s.parameters[i].Name == s.parameters[j].Name && s.parameters[i].In < s.parameters[j].In) +} + +// SortParameters sorts parameters by Name and In fields. +func sortParameters(p []*spec3.Parameter) { + sort.Sort(byNameIn{p}) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 682014bda..cf1d01f90 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -57,6 +57,11 @@ type PathHandler interface { Handle(path string, handler http.Handler) } +type PathHandlerByGroupVersion interface { + Handle(path string, handler http.Handler) + HandlePrefix(path string, handler http.Handler) +} + // Config is set of configuration for openAPI spec generation. type Config struct { // List of supported protocols such as https, http, etc. diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/enum.go b/vendor/k8s.io/kube-openapi/pkg/generators/enum.go new file mode 100644 index 000000000..8b1994fec --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/enum.go @@ -0,0 +1,152 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" +) + +const tagEnumType = "enum" +const enumTypeDescriptionHeader = "Possible enum values:" + +type enumValue struct { + Name string + Value string + Comment string +} + +type enumType struct { + Name types.Name + Values []*enumValue +} + +// enumMap is a map from the name to the matching enum type. +type enumMap map[types.Name]*enumType + +type enumContext struct { + enumTypes enumMap +} + +func newEnumContext(c *generator.Context) *enumContext { + return &enumContext{enumTypes: parseEnums(c)} +} + +// EnumType checks and finds the enumType for a given type. +// If the given type is a known enum type, returns the enumType, true +// Otherwise, returns nil, false +func (ec *enumContext) EnumType(t *types.Type) (enum *enumType, isEnum bool) { + enum, ok := ec.enumTypes[t.Name] + return enum, ok +} + +// ValueStrings returns all possible values of the enum type as strings +// the results are sorted and quoted as Go literals. +func (et *enumType) ValueStrings() []string { + var values []string + for _, value := range et.Values { + // use "%q" format to generate a Go literal of the string const value + values = append(values, fmt.Sprintf("%q", value.Value)) + } + sort.Strings(values) + return values +} + +// DescriptionLines returns a description of the enum in this format: +// +// Possible enum values: +// - `"value1"` description 1 +// - `"value2"` description 2 +func (et *enumType) DescriptionLines() []string { + var lines []string + for _, value := range et.Values { + lines = append(lines, value.Description()) + } + sort.Strings(lines) + // Prepend an empty string to initiate a new paragraph. + return append([]string{"", enumTypeDescriptionHeader}, lines...) +} + +func parseEnums(c *generator.Context) enumMap { + // First, find the builtin "string" type + stringType := c.Universe.Type(types.Name{Name: "string"}) + + enumTypes := make(enumMap) + for _, p := range c.Universe { + // find all enum types. + for _, t := range p.Types { + if isEnumType(stringType, t) { + if _, ok := enumTypes[t.Name]; !ok { + enumTypes[t.Name] = &enumType{ + Name: t.Name, + } + } + } + } + // find all enum values from constants, and try to match each with its type. + for _, c := range p.Constants { + enumType := c.Underlying + if _, ok := enumTypes[enumType.Name]; ok { + value := &enumValue{ + Name: c.Name.Name, + Value: *c.ConstValue, + Comment: strings.Join(c.CommentLines, " "), + } + enumTypes[enumType.Name].appendValue(value) + } + } + } + + return enumTypes +} + +func (et *enumType) appendValue(value *enumValue) { + et.Values = append(et.Values, value) +} + +// Description returns the description line for the enumValue +// with the format: +// - `"FooValue"` is the Foo value +func (ev *enumValue) Description() string { + comment := strings.TrimSpace(ev.Comment) + // The comment should starts with the type name, trim it first. + comment = strings.TrimPrefix(comment, ev.Name) + // Trim the possible space after previous step. + comment = strings.TrimSpace(comment) + // The comment may be multiline, cascade all consecutive whitespaces. + comment = whitespaceRegex.ReplaceAllString(comment, " ") + return fmt.Sprintf(" - `%q` %s", ev.Value, comment) +} + +// isEnumType checks if a given type is an enum by the definition +// An enum type should be an alias of string and has tag '+enum' in its comment. +// Additionally, pass the type of builtin 'string' to check against. +func isEnumType(stringType *types.Type, t *types.Type) bool { + return t.Kind == types.Alias && t.Underlying == stringType && hasEnumTag(t) +} + +func hasEnumTag(t *types.Type) bool { + return types.ExtractCommentTags("+", t.CommentLines)[tagEnumType] != nil +} + +// whitespaceRegex is the regex for consecutive whitespaces. +var whitespaceRegex = regexp.MustCompile(`\s+`) diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go index 15a911288..e37d93ef7 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go @@ -66,6 +66,10 @@ var tagToExtension = map[string]extensionAttributes{ kind: types.Struct, allowedValues: sets.NewString("atomic", "granular"), }, + "validations": { + xName: "x-kubernetes-validations", + kind: types.Slice, + }, } // Extension encapsulates information necessary to generate an OpenAPI extension. diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index a294c2e8a..5d0349ccb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -225,6 +225,7 @@ type openAPITypeWriter struct { *generator.SnippetWriter context *generator.Context refTypes map[string]*types.Type + enumContext *enumContext GetDefinitionInterface *types.Type } @@ -233,6 +234,7 @@ func newOpenAPITypeWriter(sw *generator.SnippetWriter, c *generator.Context) ope SnippetWriter: sw, context: c, refTypes: map[string]*types.Type{}, + enumContext: newEnumContext(c), } } @@ -548,7 +550,7 @@ func mustEnforceDefault(t *types.Type, omitEmpty bool) (interface{}, error) { } func (g openAPITypeWriter) generateDefault(comments []string, t *types.Type, omitEmpty bool) error { - t = resolveAliasType(t) + t = resolveAliasAndEmbeddedType(t) def, err := defaultFromComments(comments) if err != nil { return err @@ -625,7 +627,11 @@ func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type) return err } g.Do("SchemaProps: spec.SchemaProps{\n", nil) - g.generateDescription(m.CommentLines) + var extraComments []string + if enumType, isEnum := g.enumContext.EnumType(m.Type); isEnum { + extraComments = enumType.DescriptionLines() + } + g.generateDescription(append(m.CommentLines, extraComments...)) jsonTags := getJsonTags(m) if len(jsonTags) > 1 && jsonTags[1] == "string" { g.generateSimpleProperty("string", "") @@ -641,6 +647,10 @@ func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type) typeString, format := openapi.OpenAPITypeFormat(t.String()) if typeString != "" { g.generateSimpleProperty(typeString, format) + if enumType, isEnum := g.enumContext.EnumType(m.Type); isEnum { + // original type is an enum, add "Enum: " and the values + g.Do("Enum: []interface{}{$.$}", strings.Join(enumType.ValueStrings(), ", ")) + } g.Do("},\n},\n", nil) return nil } @@ -674,13 +684,18 @@ func (g openAPITypeWriter) generateReferenceProperty(t *types.Type) { g.Do("Ref: ref(\"$.$\"),\n", t.Name.String()) } -func resolveAliasType(t *types.Type) *types.Type { +func resolveAliasAndEmbeddedType(t *types.Type) *types.Type { var prev *types.Type for prev != t { prev = t if t.Kind == types.Alias { t = t.Underlying } + if t.Kind == types.Struct { + if len(t.Members) == 1 && t.Members[0].Embedded { + t = t.Members[0].Type + } + } } return t } diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go index 7de4eba76..7338094cb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go @@ -20,6 +20,7 @@ import ( "bytes" "compress/gzip" "crypto/sha512" + "encoding/json" "fmt" "mime" "net/http" @@ -30,9 +31,9 @@ import ( "github.com/emicklei/go-restful" "github.com/golang/protobuf/proto" openapi_v2 "github.com/googleapis/gnostic/openapiv2" - jsoniter "github.com/json-iterator/go" "github.com/munnerz/goautoneg" "gopkg.in/yaml.v2" + klog "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/builder" "k8s.io/kube-openapi/pkg/common" "k8s.io/kube-openapi/pkg/validation/spec" @@ -55,13 +56,40 @@ type OpenAPIService struct { lastModified time.Time - specBytes []byte - specPb []byte - specPbGz []byte + jsonCache cache + protoCache cache +} - specBytesETag string - specPbETag string - specPbGzETag string +type cache struct { + BuildCache func() ([]byte, error) + once sync.Once + bytes []byte + etag string + err error +} + +func (c *cache) Get() ([]byte, string, error) { + c.once.Do(func() { + bytes, err := c.BuildCache() + // if there is an error updating the cache, there can be situations where + // c.bytes contains a valid value (carried over from the previous update) + // but c.err is also not nil; the cache user is expected to check for this + c.err = err + if c.err == nil { + // don't override previous spec if we had an error + c.bytes = bytes + c.etag = computeETag(c.bytes) + } + }) + return c.bytes, c.etag, c.err +} + +func (c *cache) New(cacheBuilder func() ([]byte, error)) cache { + return cache{ + bytes: c.bytes, + etag: c.etag, + BuildCache: cacheBuilder, + } } func init() { @@ -71,6 +99,9 @@ func init() { } func computeETag(data []byte) string { + if data == nil { + return "" + } return fmt.Sprintf("\"%X\"", sha512.Sum512(data)) } @@ -83,51 +114,40 @@ func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) { return o, nil } -func (o *OpenAPIService) getSwaggerBytes() ([]byte, string, time.Time) { +func (o *OpenAPIService) getSwaggerBytes() ([]byte, string, time.Time, error) { o.rwMutex.RLock() defer o.rwMutex.RUnlock() - return o.specBytes, o.specBytesETag, o.lastModified + specBytes, etag, err := o.jsonCache.Get() + if err != nil { + return nil, "", time.Time{}, err + } + return specBytes, etag, o.lastModified, nil } -func (o *OpenAPIService) getSwaggerPbBytes() ([]byte, string, time.Time) { +func (o *OpenAPIService) getSwaggerPbBytes() ([]byte, string, time.Time, error) { o.rwMutex.RLock() defer o.rwMutex.RUnlock() - return o.specPb, o.specPbETag, o.lastModified -} - -func (o *OpenAPIService) getSwaggerPbGzBytes() ([]byte, string, time.Time) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() - return o.specPbGz, o.specPbGzETag, o.lastModified + specPb, etag, err := o.protoCache.Get() + if err != nil { + return nil, "", time.Time{}, err + } + return specPb, etag, o.lastModified, nil } func (o *OpenAPIService) UpdateSpec(openapiSpec *spec.Swagger) (err error) { - specBytes, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(openapiSpec) - if err != nil { - return err - } - specPb, err := ToProtoBinary(specBytes) - if err != nil { - return err - } - specPbGz := toGzip(specPb) - - specBytesETag := computeETag(specBytes) - specPbETag := computeETag(specPb) - specPbGzETag := computeETag(specPbGz) - - lastModified := time.Now() - o.rwMutex.Lock() defer o.rwMutex.Unlock() - - o.specBytes = specBytes - o.specPb = specPb - o.specPbGz = specPbGz - o.specBytesETag = specBytesETag - o.specPbETag = specPbETag - o.specPbGzETag = specPbGzETag - o.lastModified = lastModified + o.jsonCache = o.jsonCache.New(func() ([]byte, error) { + return json.Marshal(openapiSpec) + }) + o.protoCache = o.protoCache.New(func() ([]byte, error) { + json, _, err := o.jsonCache.Get() + if err != nil { + return nil, err + } + return ToProtoBinary(json) + }) + o.lastModified = time.Now() return nil } @@ -206,7 +226,7 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl accepted := []struct { Type string SubType string - GetDataAndETag func() ([]byte, string, time.Time) + GetDataAndETag func() ([]byte, string, time.Time, error) }{ {"application", "json", o.getSwaggerBytes}, {"application", "com.github.proto-openapi.spec.v2@v1.0+protobuf", o.getSwaggerPbBytes}, @@ -230,7 +250,15 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl } // serve the first matching media type in the sorted clause list - data, etag, lastModified := accepts.GetDataAndETag() + data, etag, lastModified, err := accepts.GetDataAndETag() + if err != nil { + klog.Errorf("Error in OpenAPI handler: %s", err) + // only return a 503 if we have no older cache data to serve + if data == nil { + w.WriteHeader(http.StatusServiceUnavailable) + return + } + } w.Header().Set("Etag", etag) // ServeContent will take care of caching using eTag. http.ServeContent(w, r, servePath, lastModified, bytes.NewReader(data)) diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go new file mode 100644 index 000000000..aa3290823 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -0,0 +1,248 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler3 + +import ( + "bytes" + "compress/gzip" + "crypto/sha512" + "encoding/json" + "fmt" + "mime" + "net/http" + "sort" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + openapi_v3 "github.com/googleapis/gnostic/openapiv3" + "github.com/munnerz/goautoneg" + "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/spec3" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +const ( + jsonExt = ".json" + + mimeJson = "application/json" + // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. + mimePb = "application/com.github.googleapis.gnostic.OpenAPIv3@68f4ded+protobuf" + mimePbGz = "application/x-gzip" + + subTypeProtobuf = "com.github.proto-openapi.spec.v3@v1.0+protobuf" + subTypeJSON = "json" +) + +// OpenAPIService is the service responsible for serving OpenAPI spec. It has +// the ability to safely change the spec while serving it. +// OpenAPI V3 currently does not use the lazy marshaling strategy that OpenAPI V2 is using +type OpenAPIService struct { + // rwMutex protects All members of this service. + rwMutex sync.RWMutex + lastModified time.Time + v3Schema map[string]*OpenAPIV3Group +} + +type OpenAPIV3Group struct { + rwMutex sync.RWMutex + + lastModified time.Time + + specBytes []byte + specPb []byte + specPbGz []byte + + specBytesETag string + specPbETag string + specPbGzETag string +} + +func init() { + mime.AddExtensionType(".json", mimeJson) + mime.AddExtensionType(".pb-v1", mimePb) + mime.AddExtensionType(".gz", mimePbGz) +} + +func computeETag(data []byte) string { + return fmt.Sprintf("\"%X\"", sha512.Sum512(data)) +} + +// NewOpenAPIService builds an OpenAPIService starting with the given spec. +func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) { + o := &OpenAPIService{} + o.v3Schema = make(map[string]*OpenAPIV3Group) + return o, nil +} + +func (o *OpenAPIService) getGroupBytes() ([]byte, error) { + o.rwMutex.RLock() + defer o.rwMutex.RUnlock() + keys := make([]string, len(o.v3Schema)) + i := 0 + for k := range o.v3Schema { + keys[i] = k + i++ + } + + sort.Strings(keys) + group := make(map[string][]string) + group["Paths"] = keys + + j, err := json.Marshal(group) + if err != nil { + return nil, err + } + return j, nil +} + +func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]byte, string, time.Time, error) { + o.rwMutex.RLock() + defer o.rwMutex.RUnlock() + v, ok := o.v3Schema[group] + if !ok { + return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group) + } + if getType == subTypeJSON { + return v.specBytes, v.specBytesETag, v.lastModified, nil + } else if getType == subTypeProtobuf { + return v.specPb, v.specPbETag, v.lastModified, nil + } + return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) +} + +func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) (err error) { + o.rwMutex.Lock() + defer o.rwMutex.Unlock() + + specBytes, err := json.Marshal(openapi) + if err != nil { + return err + } + + if _, ok := o.v3Schema[group]; !ok { + o.v3Schema[group] = &OpenAPIV3Group{} + } + return o.v3Schema[group].UpdateSpec(specBytes) +} + +func (o *OpenAPIService) DeleteGroupVersion(group string) { + o.rwMutex.Lock() + defer o.rwMutex.Unlock() + delete(o.v3Schema, group) +} + +func ToV3ProtoBinary(json []byte) ([]byte, error) { + document, err := openapi_v3.ParseDocument(json) + if err != nil { + return nil, err + } + return proto.Marshal(document) +} + +func toGzip(data []byte) []byte { + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + zw.Write(data) + zw.Close() + return buf.Bytes() +} + +func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { + data, _ := o.getGroupBytes() + http.ServeContent(w, r, "/openapi/v3", time.Now(), bytes.NewReader(data)) +} + +func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) { + url := strings.SplitAfterN(r.URL.Path, "/", 4) + group := url[3] + + decipherableFormats := r.Header.Get("Accept") + if decipherableFormats == "" { + decipherableFormats = "*/*" + } + clauses := goautoneg.ParseAccept(decipherableFormats) + w.Header().Add("Vary", "Accept") + + if len(clauses) == 0 { + return + } + + accepted := []struct { + Type string + SubType string + }{ + {"application", subTypeJSON}, + {"application", subTypeProtobuf}, + } + + for _, clause := range clauses { + for _, accepts := range accepted { + if clause.Type != accepts.Type && clause.Type != "*" { + continue + } + if clause.SubType != accepts.SubType && clause.SubType != "*" { + continue + } + data, etag, lastModified, err := o.getSingleGroupBytes(accepts.SubType, group) + if err != nil { + return + } + w.Header().Set("Etag", etag) + http.ServeContent(w, r, "", lastModified, bytes.NewReader(data)) + return + } + } + w.WriteHeader(406) + return +} + +func (o *OpenAPIService) RegisterOpenAPIV3VersionedService(servePath string, handler common.PathHandlerByGroupVersion) error { + handler.Handle(servePath, http.HandlerFunc(o.HandleDiscovery)) + handler.HandlePrefix(servePath+"/", http.HandlerFunc(o.HandleGroupVersion)) + return nil +} + +func (o *OpenAPIV3Group) UpdateSpec(specBytes []byte) (err error) { + o.rwMutex.Lock() + defer o.rwMutex.Unlock() + + specPb, err := ToV3ProtoBinary(specBytes) + if err != nil { + return err + } + + specPbGz := toGzip(specPb) + + specBytesETag := computeETag(specBytes) + specPbETag := computeETag(specPb) + specPbGzETag := computeETag(specPbGz) + + lastModified := time.Now() + + o.specBytes = specBytes + o.specPb = specPb + o.specPbGz = specPbGz + + o.specBytesETag = specBytesETag + o.specPbETag = specPbETag + o.specPbGzETag = specPbGzETag + + o.lastModified = lastModified + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index 99b33d34f..35241fde6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -449,10 +449,7 @@ func (c *convert) VisitPrimitive(p *proto.Primitive) { } func (c *convert) VisitArbitrary(a *proto.Arbitrary) { - *c.top() = untypedDef.Atom - if c.preserveUnknownFields { - *c.top() = deducedDef.Atom - } + *c.top() = deducedDef.Atom } func (c *convert) VisitReference(proto.Reference) { diff --git a/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go b/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go new file mode 100644 index 000000000..3fac658e3 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go @@ -0,0 +1,519 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemamutation + +import ( + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// Walker runs callback functions on all references of an OpenAPI spec, +// replacing the values when visiting corresponding types. +type Walker struct { + // SchemaCallback will be called on each schema, taking the original schema, + // and before any other callbacks of the Walker. + // If the schema needs to be mutated, DO NOT mutate it in-place, + // always create a copy, mutate, and return it. + SchemaCallback func(schema *spec.Schema) *spec.Schema + + // RefCallback will be called on each ref. + // If the ref needs to be mutated, DO NOT mutate it in-place, + // always create a copy, mutate, and return it. + RefCallback func(ref *spec.Ref) *spec.Ref +} + +type SchemaCallbackFunc func(schema *spec.Schema) *spec.Schema +type RefCallbackFunc func(ref *spec.Ref) *spec.Ref + +var SchemaCallBackNoop SchemaCallbackFunc = func(schema *spec.Schema) *spec.Schema { + return schema +} +var RefCallbackNoop RefCallbackFunc = func(ref *spec.Ref) *spec.Ref { + return ref +} + +// ReplaceReferences rewrites the references without mutating the input. +// The output might share data with the input. +func ReplaceReferences(walkRef func(ref *spec.Ref) *spec.Ref, sp *spec.Swagger) *spec.Swagger { + walker := &Walker{RefCallback: walkRef, SchemaCallback: SchemaCallBackNoop} + return walker.WalkRoot(sp) +} + +func (w *Walker) WalkSchema(schema *spec.Schema) *spec.Schema { + if schema == nil { + return nil + } + + orig := schema + clone := func() { + if orig == schema { + schema = &spec.Schema{} + *schema = *orig + } + } + + // Always run callback on the whole schema first + // so that SchemaCallback can take the original schema as input. + schema = w.SchemaCallback(schema) + + if r := w.RefCallback(&schema.Ref); r != &schema.Ref { + clone() + schema.Ref = *r + } + + definitionsCloned := false + for k, v := range schema.Definitions { + if s := w.WalkSchema(&v); s != &v { + if !definitionsCloned { + definitionsCloned = true + clone() + schema.Definitions = make(spec.Definitions, len(orig.Definitions)) + for k2, v2 := range orig.Definitions { + schema.Definitions[k2] = v2 + } + } + schema.Definitions[k] = *s + } + } + + propertiesCloned := false + for k, v := range schema.Properties { + if s := w.WalkSchema(&v); s != &v { + if !propertiesCloned { + propertiesCloned = true + clone() + schema.Properties = make(map[string]spec.Schema, len(orig.Properties)) + for k2, v2 := range orig.Properties { + schema.Properties[k2] = v2 + } + } + schema.Properties[k] = *s + } + } + + patternPropertiesCloned := false + for k, v := range schema.PatternProperties { + if s := w.WalkSchema(&v); s != &v { + if !patternPropertiesCloned { + patternPropertiesCloned = true + clone() + schema.PatternProperties = make(map[string]spec.Schema, len(orig.PatternProperties)) + for k2, v2 := range orig.PatternProperties { + schema.PatternProperties[k2] = v2 + } + } + schema.PatternProperties[k] = *s + } + } + + allOfCloned := false + for i := range schema.AllOf { + if s := w.WalkSchema(&schema.AllOf[i]); s != &schema.AllOf[i] { + if !allOfCloned { + allOfCloned = true + clone() + schema.AllOf = make([]spec.Schema, len(orig.AllOf)) + copy(schema.AllOf, orig.AllOf) + } + schema.AllOf[i] = *s + } + } + + anyOfCloned := false + for i := range schema.AnyOf { + if s := w.WalkSchema(&schema.AnyOf[i]); s != &schema.AnyOf[i] { + if !anyOfCloned { + anyOfCloned = true + clone() + schema.AnyOf = make([]spec.Schema, len(orig.AnyOf)) + copy(schema.AnyOf, orig.AnyOf) + } + schema.AnyOf[i] = *s + } + } + + oneOfCloned := false + for i := range schema.OneOf { + if s := w.WalkSchema(&schema.OneOf[i]); s != &schema.OneOf[i] { + if !oneOfCloned { + oneOfCloned = true + clone() + schema.OneOf = make([]spec.Schema, len(orig.OneOf)) + copy(schema.OneOf, orig.OneOf) + } + schema.OneOf[i] = *s + } + } + + if schema.Not != nil { + if s := w.WalkSchema(schema.Not); s != schema.Not { + clone() + schema.Not = s + } + } + + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + if s := w.WalkSchema(schema.AdditionalProperties.Schema); s != schema.AdditionalProperties.Schema { + clone() + schema.AdditionalProperties = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalProperties.Allows} + } + } + + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + if s := w.WalkSchema(schema.AdditionalItems.Schema); s != schema.AdditionalItems.Schema { + clone() + schema.AdditionalItems = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalItems.Allows} + } + } + + if schema.Items != nil { + if schema.Items.Schema != nil { + if s := w.WalkSchema(schema.Items.Schema); s != schema.Items.Schema { + clone() + schema.Items = &spec.SchemaOrArray{Schema: s} + } + } else { + itemsCloned := false + for i := range schema.Items.Schemas { + if s := w.WalkSchema(&schema.Items.Schemas[i]); s != &schema.Items.Schemas[i] { + if !itemsCloned { + clone() + schema.Items = &spec.SchemaOrArray{ + Schemas: make([]spec.Schema, len(orig.Items.Schemas)), + } + itemsCloned = true + copy(schema.Items.Schemas, orig.Items.Schemas) + } + schema.Items.Schemas[i] = *s + } + } + } + } + + return schema +} + +func (w *Walker) walkParameter(param *spec.Parameter) *spec.Parameter { + if param == nil { + return nil + } + + orig := param + cloned := false + clone := func() { + if !cloned { + cloned = true + param = &spec.Parameter{} + *param = *orig + } + } + + if r := w.RefCallback(¶m.Ref); r != ¶m.Ref { + clone() + param.Ref = *r + } + if s := w.WalkSchema(param.Schema); s != param.Schema { + clone() + param.Schema = s + } + if param.Items != nil { + if r := w.RefCallback(¶m.Items.Ref); r != ¶m.Items.Ref { + param.Items.Ref = *r + } + } + + return param +} + +func (w *Walker) walkParameters(params []spec.Parameter) ([]spec.Parameter, bool) { + if params == nil { + return nil, false + } + + orig := params + cloned := false + clone := func() { + if !cloned { + cloned = true + params = make([]spec.Parameter, len(params)) + copy(params, orig) + } + } + + for i := range params { + if s := w.walkParameter(¶ms[i]); s != ¶ms[i] { + clone() + params[i] = *s + } + } + + return params, cloned +} + +func (w *Walker) walkResponse(resp *spec.Response) *spec.Response { + if resp == nil { + return nil + } + + orig := resp + cloned := false + clone := func() { + if !cloned { + cloned = true + resp = &spec.Response{} + *resp = *orig + } + } + + if r := w.RefCallback(&resp.Ref); r != &resp.Ref { + clone() + resp.Ref = *r + } + if s := w.WalkSchema(resp.Schema); s != resp.Schema { + clone() + resp.Schema = s + } + + return resp +} + +func (w *Walker) walkResponses(resps *spec.Responses) *spec.Responses { + if resps == nil { + return nil + } + + orig := resps + cloned := false + clone := func() { + if !cloned { + cloned = true + resps = &spec.Responses{} + *resps = *orig + } + } + + if r := w.walkResponse(resps.ResponsesProps.Default); r != resps.ResponsesProps.Default { + clone() + resps.Default = r + } + + responsesCloned := false + for k, v := range resps.ResponsesProps.StatusCodeResponses { + if r := w.walkResponse(&v); r != &v { + if !responsesCloned { + responsesCloned = true + clone() + resps.ResponsesProps.StatusCodeResponses = make(map[int]spec.Response, len(orig.StatusCodeResponses)) + for k2, v2 := range orig.StatusCodeResponses { + resps.ResponsesProps.StatusCodeResponses[k2] = v2 + } + } + resps.ResponsesProps.StatusCodeResponses[k] = *r + } + } + + return resps +} + +func (w *Walker) walkOperation(op *spec.Operation) *spec.Operation { + if op == nil { + return nil + } + + orig := op + cloned := false + clone := func() { + if !cloned { + cloned = true + op = &spec.Operation{} + *op = *orig + } + } + + parametersCloned := false + for i := range op.Parameters { + if s := w.walkParameter(&op.Parameters[i]); s != &op.Parameters[i] { + if !parametersCloned { + parametersCloned = true + clone() + op.Parameters = make([]spec.Parameter, len(orig.Parameters)) + copy(op.Parameters, orig.Parameters) + } + op.Parameters[i] = *s + } + } + + if r := w.walkResponses(op.Responses); r != op.Responses { + clone() + op.Responses = r + } + + return op +} + +func (w *Walker) walkPathItem(pathItem *spec.PathItem) *spec.PathItem { + if pathItem == nil { + return nil + } + + orig := pathItem + cloned := false + clone := func() { + if !cloned { + cloned = true + pathItem = &spec.PathItem{} + *pathItem = *orig + } + } + + if p, changed := w.walkParameters(pathItem.Parameters); changed { + clone() + pathItem.Parameters = p + } + if op := w.walkOperation(pathItem.Get); op != pathItem.Get { + clone() + pathItem.Get = op + } + if op := w.walkOperation(pathItem.Head); op != pathItem.Head { + clone() + pathItem.Head = op + } + if op := w.walkOperation(pathItem.Delete); op != pathItem.Delete { + clone() + pathItem.Delete = op + } + if op := w.walkOperation(pathItem.Options); op != pathItem.Options { + clone() + pathItem.Options = op + } + if op := w.walkOperation(pathItem.Patch); op != pathItem.Patch { + clone() + pathItem.Patch = op + } + if op := w.walkOperation(pathItem.Post); op != pathItem.Post { + clone() + pathItem.Post = op + } + if op := w.walkOperation(pathItem.Put); op != pathItem.Put { + clone() + pathItem.Put = op + } + + return pathItem +} + +func (w *Walker) walkPaths(paths *spec.Paths) *spec.Paths { + if paths == nil { + return nil + } + + orig := paths + cloned := false + clone := func() { + if !cloned { + cloned = true + paths = &spec.Paths{} + *paths = *orig + } + } + + pathsCloned := false + for k, v := range paths.Paths { + if p := w.walkPathItem(&v); p != &v { + if !pathsCloned { + pathsCloned = true + clone() + paths.Paths = make(map[string]spec.PathItem, len(orig.Paths)) + for k2, v2 := range orig.Paths { + paths.Paths[k2] = v2 + } + } + paths.Paths[k] = *p + } + } + + return paths +} + +func (w *Walker) WalkRoot(swagger *spec.Swagger) *spec.Swagger { + if swagger == nil { + return nil + } + + orig := swagger + cloned := false + clone := func() { + if !cloned { + cloned = true + swagger = &spec.Swagger{} + *swagger = *orig + } + } + + parametersCloned := false + for k, v := range swagger.Parameters { + if p := w.walkParameter(&v); p != &v { + if !parametersCloned { + parametersCloned = true + clone() + swagger.Parameters = make(map[string]spec.Parameter, len(orig.Parameters)) + for k2, v2 := range orig.Parameters { + swagger.Parameters[k2] = v2 + } + } + swagger.Parameters[k] = *p + } + } + + responsesCloned := false + for k, v := range swagger.Responses { + if r := w.walkResponse(&v); r != &v { + if !responsesCloned { + responsesCloned = true + clone() + swagger.Responses = make(map[string]spec.Response, len(orig.Responses)) + for k2, v2 := range orig.Responses { + swagger.Responses[k2] = v2 + } + } + swagger.Responses[k] = *r + } + } + + definitionsCloned := false + for k, v := range swagger.Definitions { + if s := w.WalkSchema(&v); s != &v { + if !definitionsCloned { + definitionsCloned = true + clone() + swagger.Definitions = make(spec.Definitions, len(orig.Definitions)) + for k2, v2 := range orig.Definitions { + swagger.Definitions[k2] = v2 + } + } + swagger.Definitions[k] = *s + } + } + + if swagger.Paths != nil { + if p := w.walkPaths(swagger.Paths); p != swagger.Paths { + clone() + swagger.Paths = p + } + } + + return swagger +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/component.go b/vendor/k8s.io/kube-openapi/pkg/spec3/component.go new file mode 100644 index 000000000..c1bb8bc7b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/component.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import "k8s.io/kube-openapi/pkg/validation/spec" + +// Components holds a set of reusable objects for different aspects of the OAS. +// All objects defined within the components object will have no effect on the API +// unless they are explicitly referenced from properties outside the components object. +// +// more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#componentsObject +type Components struct { + // Schemas holds reusable Schema Objects + Schemas map[string]*spec.Schema `json:"schemas,omitempty"` + // SecuritySchemes holds reusable Security Scheme Objects, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject + SecuritySchemes SecuritySchemes `json:"securitySchemes,omitempty"` + // Responses holds reusable Responses Objects + Responses map[string]*Response `json:"responses,omitempty"` + // Parameters holds reusable Parameters Objects + Parameters map[string]*Parameter `json:"parameters,omitempty"` + // Example holds reusable Example objects + Examples map[string]*Example `json:"examples,omitempty"` + // RequestBodies holds reusable Request Body objects + RequestBodies map[string]*RequestBody `json:"requestBodies,omitempty"` + // Links is a map of operations links that can be followed from the response + Links map[string]*Link `json:"links,omitempty"` + // Headers holds a maps of a headers name to its definition + Headers map[string]*Header `json:"headers,omitempty"` + // all fields are defined at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#componentsObject +} + +// SecuritySchemes holds reusable Security Scheme Objects, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject +type SecuritySchemes map[string]*SecurityScheme diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go new file mode 100644 index 000000000..51dac4bdf --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -0,0 +1,64 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +type Encoding struct { + EncodingProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Encoding as JSON +func (e *Encoding) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(e.EncodingProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(e.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (e *Encoding) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &e.EncodingProps); err != nil { + return err + } + if err := json.Unmarshal(data, &e.VendorExtensible); err != nil { + return err + } + return nil +} + +type EncodingProps struct { + // Content Type for encoding a specific property + ContentType string `json:"contentType,omitempty"` + // A map allowing additional information to be provided as headers + Headers map[string]*Header `json:"headers,omitempty"` + // Describes how a specific property value will be serialized depending on its type + Style string `json:"style,omitempty"` + // When this is true, property values of type array or object generate separate parameters for each value of the array, or key-value-pair of the map. For other types of properties this property has no effect + Explode string `json:"explode,omitempty"` + // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 + AllowReserved bool `json:"allowReserved,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go new file mode 100644 index 000000000..0f5ab983c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -0,0 +1,73 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" +) + +// Example https://swagger.io/specification/#example-object + +type Example struct { + spec.Refable + ExampleProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON +func (e *Example) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(e.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(e.ExampleProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(e.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (e *Example) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &e.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &e.ExampleProps); err != nil { + return err + } + if err := json.Unmarshal(data, &e.VendorExtensible); err != nil { + return err + } + return nil +} + +type ExampleProps struct { + // Summary holds a short description of the example + Summary string `json:"summary,omitempty"` + // Description holds a long description of the example + Description string `json:"description,omitempty"` + // Embedded literal example. + Value interface{} `json:"value,omitempty"` + // A URL that points to the literal example. This provides the capability to reference examples that cannot easily be included in JSON or YAML documents. + ExternalValue string `json:"externalValue,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go new file mode 100644 index 000000000..117113e7a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -0,0 +1,58 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" +) + +type ExternalDocumentation struct { + ExternalDocumentationProps + spec.VendorExtensible +} + +type ExternalDocumentationProps struct { + // Description is a short description of the target documentation. CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty"` + // URL is the URL for the target documentation. + URL string `json:"url"` +} + +// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON +func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(e.ExternalDocumentationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(e.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &e.ExternalDocumentationProps); err != nil { + return err + } + if err := json.Unmarshal(data, &e.VendorExtensible); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go new file mode 100644 index 000000000..cead4b15d --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// Header a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject +// +// Note that this struct is actually a thin wrapper around HeaderProps to make it referable and extensible +type Header struct { + spec.Refable + HeaderProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Header as JSON +func (h *Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &h.HeaderProps); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + + return nil +} + +// HeaderProps a struct that describes a header object +type HeaderProps struct { + // Description holds a brief description of the parameter + Description string `json:"description,omitempty"` + // Required determines whether this parameter is mandatory + Required bool `json:"required,omitempty"` + // Deprecated declares this operation to be deprecated + Deprecated bool `json:"deprecated,omitempty"` + // AllowEmptyValue sets the ability to pass empty-valued parameters + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` + // Style describes how the parameter value will be serialized depending on the type of the parameter value + Style string `json:"style,omitempty"` + // Explode when true, parameter values of type array or object generate separate parameters for each value of the array or key-value pair of the map + Explode bool `json:"explode,omitempty"` + // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 + AllowReserved bool `json:"allowReserved,omitempty"` + // Schema holds the schema defining the type used for the parameter + Schema *spec.Schema `json:"schema,omitempty"` + // Content holds a map containing the representations for the parameter + Content map[string]*MediaType `json:"content,omitempty"` + // Example of the header + Example interface{} `json:"example,omitempty"` + // Examples of the header + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go new file mode 100644 index 000000000..828fd8dc5 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -0,0 +1,66 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// MediaType a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject +// +// Note that this struct is actually a thin wrapper around MediaTypeProps to make it referable and extensible +type MediaType struct { + MediaTypeProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode MediaType as JSON +func (m *MediaType) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(m.MediaTypeProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(m.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (m *MediaType) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &m.MediaTypeProps); err != nil { + return err + } + if err := json.Unmarshal(data, &m.VendorExtensible); err != nil { + return err + } + return nil +} + +// MediaTypeProps a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject +type MediaTypeProps struct { + // Schema holds the schema defining the type used for the media type + Schema *spec.Schema `json:"schema,omitempty"` + // Example of the media type + Example interface{} `json:"example,omitempty"` + // Examples of the media type. Each example object should match the media type and specific schema if present + Examples map[string]*Example `json:"examples,omitempty"` + // A map between a property name and its encoding information. The key, being the property name, MUST exist in the schema as a property. The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded + Encoding map[string]*Encoding `json:"encoding,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go new file mode 100644 index 000000000..de8aa4602 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -0,0 +1,79 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" +) + +// Operation describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject +// +// Note that this struct is actually a thin wrapper around OperationProps to make it referable and extensible +type Operation struct { + OperationProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Operation as JSON +func (o *Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// OperationProps describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject +type OperationProps struct { + // Tags holds a list of tags for API documentation control + Tags []string `json:"tags,omitempty"` + // Summary holds a short summary of what the operation does + Summary string `json:"summary,omitempty"` + // Description holds a verbose explanation of the operation behavior + Description string `json:"description,omitempty"` + // ExternalDocs holds additional external documentation for this operation + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + // OperationId holds a unique string used to identify the operation + OperationId string `json:"operationId,omitempty"` + // Parameters a list of parameters that are applicable for this operation + Parameters []*Parameter `json:"parameters,omitempty"` + // RequestBody holds the request body applicable for this operation + RequestBody *RequestBody `json:"requestBody,omitempty"` + // Responses holds the list of possible responses as they are returned from executing this operation + Responses *Responses `json:"responses,omitempty"` + // Deprecated declares this operation to be deprecated + Deprecated bool `json:"deprecated,omitempty"` + // SecurityRequirement holds a declaration of which security mechanisms can be used for this operation + SecurityRequirement []*SecurityRequirement `json:"security,omitempty"` + // Servers contains an alternative server array to service this operation + Servers []*Server `json:"servers,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go new file mode 100644 index 000000000..0d7180e50 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go @@ -0,0 +1,94 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// Parameter a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject +// +// Note that this struct is actually a thin wrapper around ParameterProps to make it referable and extensible +type Parameter struct { + spec.Refable + ParameterProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Parameter as JSON +func (p *Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.ParameterProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.ParameterProps); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + + return nil +} + +// ParameterProps a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject +type ParameterProps struct { + // Name holds the name of the parameter + Name string `json:"name,omitempty"` + // In holds the location of the parameter + In string `json:"in,omitempty"` + // Description holds a brief description of the parameter + Description string `json:"description,omitempty"` + // Required determines whether this parameter is mandatory + Required bool `json:"required,omitempty"` + // Deprecated declares this operation to be deprecated + Deprecated bool `json:"deprecated,omitempty"` + // AllowEmptyValue sets the ability to pass empty-valued parameters + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` + // Style describes how the parameter value will be serialized depending on the type of the parameter value + Style string `json:"style,omitempty"` + // Explode when true, parameter values of type array or object generate separate parameters for each value of the array or key-value pair of the map + Explode bool `json:"explode,omitempty"` + // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 + AllowReserved bool `json:"allowReserved,omitempty"` + // Schema holds the schema defining the type used for the parameter + Schema *spec.Schema `json:"schema,omitempty"` + // Content holds a map containing the representations for the parameter + Content map[string]*MediaType `json:"content,omitempty"` + // Example of the parameter's potential value + Example interface{} `json:"example,omitempty"` + // Examples of the parameter's potential value. Each example SHOULD contain a value in the correct format as specified in the parameter encoding + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go new file mode 100644 index 000000000..bc48c504d --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + "strings" + + "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" +) + +// Paths describes the available paths and operations for the API, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathsObject +type Paths struct { + Paths map[string]*Path + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Paths as JSON +func (p *Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.Paths) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]*Path) + } + var pi *Path + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// Path describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject +// +// Note that this struct is actually a thin wrapper around PathProps to make it referable and extensible +type Path struct { + spec.Refable + PathProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Path as JSON +func (p *Path) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.PathProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (p *Path) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.PathProps); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return nil +} + +// PathProps describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject +type PathProps struct { + // Summary holds a summary for all operations in this path + Summary string `json:"summary,omitempty"` + // Description holds a description for all operations in this path + Description string `json:"description,omitempty"` + // Get defines GET operation + Get *Operation `json:"get,omitempty"` + // Put defines PUT operation + Put *Operation `json:"put,omitempty"` + // Post defines POST operation + Post *Operation `json:"post,omitempty"` + // Delete defines DELETE operation + Delete *Operation `json:"delete,omitempty"` + // Options defines OPTIONS operation + Options *Operation `json:"options,omitempty"` + // Head defines HEAD operation + Head *Operation `json:"head,omitempty"` + // Patch defines PATCH operation + Patch *Operation `json:"patch,omitempty"` + // Trace defines TRACE operation + Trace *Operation `json:"trace,omitempty"` + // Servers is an alternative server array to service all operations in this path + Servers []*Server `json:"servers,omitempty"` + // Parameters a list of parameters that are applicable for this operation + Parameters []*Parameter `json:"parameters,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go new file mode 100644 index 000000000..0adc62826 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -0,0 +1,73 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" +) + +// RequestBody describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject +// +// Note that this struct is actually a thin wrapper around RequestBodyProps to make it referable and extensible +type RequestBody struct { + spec.Refable + RequestBodyProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON +func (r *RequestBody) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.RequestBodyProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (r *RequestBody) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &r.RequestBodyProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + return nil +} + +// RequestBodyProps describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject +type RequestBodyProps struct { + // Description holds a brief description of the request body + Description string `json:"description,omitempty"` + // Content is the content of the request body. The key is a media type or media type range and the value describes it + Content map[string]*MediaType `json:"content,omitempty"` + // Required determines if the request body is required in the request + Required bool `json:"required,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go new file mode 100644 index 000000000..ccd73369f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -0,0 +1,203 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + "strconv" + + "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" +) + +// Responses holds the list of possible responses as they are returned from executing this operation +// +// Note that this struct is actually a thin wrapper around ResponsesProps to make it referable and extensible +type Responses struct { + ResponsesProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON +func (r *Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + + return nil +} + +// ResponsesProps holds the list of possible responses as they are returned from executing this operation +type ResponsesProps struct { + // Default holds the documentation of responses other than the ones declared for specific HTTP response codes. Use this field to cover undeclared responses + Default *Response `json:"-"` + // StatusCodeResponses holds a map of any HTTP status code to the response definition + StatusCodeResponses map[int]*Response `json:"-"` +} + +// MarshalJSON is a custom marshal function that knows how to encode ResponsesProps as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]*Response{} + if r.Default != nil { + toser["default"] = r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]*Response + if err := json.Unmarshal(data, &res); err != nil { + return nil + } + if v, ok := res["default"]; ok { + r.Default = v + delete(res, "default") + } + for k, v := range res { + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]*Response{} + } + r.StatusCodeResponses[nk] = v + } + } + return nil +} + +// Response describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject +// +// Note that this struct is actually a thin wrapper around ResponseProps to make it referable and extensible +type Response struct { + spec.Refable + ResponseProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Response as JSON +func (r *Response) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.ResponseProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + + return nil +} + +// ResponseProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject +type ResponseProps struct { + // Description holds a short description of the response + Description string `json:"description,omitempty"` + // Headers holds a maps of a headers name to its definition + Headers map[string]*Header `json:"headers,omitempty"` + // Content holds a map containing descriptions of potential response payloads + Content map[string]*MediaType `json:"content,omitempty"` + // Links is a map of operations links that can be followed from the response + Links map[string]*Link `json:"links,omitempty"` +} + + +// Link represents a possible design-time link for a response, more at https://swagger.io/specification/#link-object +type Link struct { + spec.Refable + LinkProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Link as JSON +func (r *Link) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.LinkProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (r *Link) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &r.LinkProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + + return nil +} + +// LinkProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject +type LinkProps struct { + // OperationId is the name of an existing, resolvable OAS operation + OperationId string `json:"operationId,omitempty"` + // Parameters is a map representing parameters to pass to an operation as specified with operationId or identified via operationRef + Parameters map[string]interface{} `json:"parameters,omitempty"` + // Description holds a description of the link + Description string `json:"description,omitempty"` + // RequestBody is a literal value or expresion to use as a request body when calling the target operation + RequestBody interface{} `json:"requestBody,omitempty"` + // Server holds a server object used by the target operation + Server *Server `json:"server,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go new file mode 100644 index 000000000..0ce8924ef --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go @@ -0,0 +1,56 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" +) + +// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object +// +// Note that this struct is actually a thin wrapper around SecurityRequirementProps to make it referable and extensible +type SecurityRequirement struct { + SecurityRequirementProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode SecurityRequirement as JSON +func (s *SecurityRequirement) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SecurityRequirementProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (s *SecurityRequirement) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecurityRequirementProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} + +// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object +type SecurityRequirementProps map[string][]string diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go new file mode 100644 index 000000000..9b1352f4e --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -0,0 +1,118 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" +) + +// SecurityScheme defines reusable Security Scheme Object, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject +type SecurityScheme struct { + spec.Refable + SecuritySchemeProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode SecurityScheme as JSON +func (s *SecurityScheme) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SecuritySchemeProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + b3, err := json.Marshal(s.Refable) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &s.Refable) +} + +// SecuritySchemeProps defines a security scheme that can be used by the operations +type SecuritySchemeProps struct { + // Type of the security scheme + Type string `json:"type,omitempty"` + // Description holds a short description for security scheme + Description string `json:"description,omitempty"` + // Name holds the name of the header, query or cookie parameter to be used + Name string `json:"name,omitempty"` + // In holds the location of the API key + In string `json:"in,omitempty"` + // Scheme holds the name of the HTTP Authorization scheme to be used in the Authorization header + Scheme string `json:"scheme,omitempty"` + // BearerFormat holds a hint to the client to identify how the bearer token is formatted + BearerFormat string `json:"bearerFormat,omitempty"` + // Flows contains configuration information for the flow types supported. + Flows map[string]*OAuthFlow `json:"flows,omitempty"` + // OpenIdConnectUrl holds an url to discover OAuth2 configuration values from + OpenIdConnectUrl string `json:"openIdConnectUrl,omitempty"` +} + +// OAuthFlow contains configuration information for the flow types supported. +type OAuthFlow struct { + OAuthFlowProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode OAuthFlow as JSON +func (o *OAuthFlow) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OAuthFlowProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *OAuthFlow) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OAuthFlowProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// OAuthFlowProps holds configuration details for a supported OAuth Flow +type OAuthFlowProps struct { + // AuthorizationUrl hold the authorization URL to be used for this flow + AuthorizationUrl string `json:"authorizationUrl,omitempty"` + // TokenUrl holds the token URL to be used for this flow + TokenUrl string `json:"tokenUrl,omitempty"` + // RefreshUrl holds the URL to be used for obtaining refresh tokens + RefreshUrl string `json:"refreshUrl,omitempty"` + // Scopes holds the available scopes for the OAuth2 security scheme + Scopes map[string]string `json:"scopes,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go new file mode 100644 index 000000000..a505fb221 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -0,0 +1,98 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" + +) + +type Server struct { + ServerProps + spec.VendorExtensible +} + +type ServerProps struct { + // Description is a short description of the target documentation. CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty"` + // URL is the URL for the target documentation. + URL string `json:"url"` + // Variables contains a map between a variable name and its value. The value is used for substitution in the server's URL templeate + Variables map[string]*ServerVariable `json:"variables,omitempty"` +} + +// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON +func (s *Server) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.ServerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (s *Server) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.ServerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { + return err + } + return nil +} + +type ServerVariable struct { + ServerVariableProps + spec.VendorExtensible +} + +type ServerVariableProps struct { + // Enum is an enumeration of string values to be used if the substitution options are from a limited set + Enum []string `json:"enum,omitempty"` + // Default is the default value to use for substitution, which SHALL be sent if an alternate value is not supplied + Default string `json:"default"` + // Description is a description for the server variable + Description string `json:"description,omitempty"` +} + +// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON +func (s *ServerVariable) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.ServerVariableProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (s *ServerVariable) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.ServerVariableProps); err != nil { + return err + } + if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go new file mode 100644 index 000000000..3ff48a3c3 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go @@ -0,0 +1,37 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// OpenAPI is an object that describes an API and conforms to the OpenAPI Specification. +type OpenAPI struct { + // Version represents the semantic version number of the OpenAPI Specification that this document uses + Version string `json:"openapi"` + // Info provides metadata about the API + Info *spec.Info `json:"info"` + // Paths holds the available target and operations for the API + Paths *Paths `json:"paths,omitempty"` + // Servers is an array of Server objects which provide connectivity information to a target server + Servers []*Server `json:"servers,omitempty"` + // Components hold various schemas for the specification + Components *Components `json:"components,omitempty"` + // ExternalDocs holds additional external documentation + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go index 504a3972a..51a2f5781 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go @@ -68,6 +68,25 @@ func (e Extensions) GetStringSlice(key string) ([]string, bool) { return nil, false } +// GetObject gets the object value from the extensions. +// out must be a json serializable type; the json go struct +// tags of out are used to populate it. +func (e Extensions) GetObject(key string, out interface{}) error { + // This json serialization/deserialization could be replaced with + // an approach using reflection if the optimization becomes justified. + if v, ok := e[strings.ToLower(key)]; ok { + b, err := json.Marshal(v) + if err != nil { + return err + } + err = json.Unmarshal(b, out) + if err != nil { + return err + } + } + return nil +} + // VendorExtensible composition block. type VendorExtensible struct { Extensions Extensions diff --git a/vendor/k8s.io/utils/clock/README.md b/vendor/k8s.io/utils/clock/README.md new file mode 100644 index 000000000..ad2a8868a --- /dev/null +++ b/vendor/k8s.io/utils/clock/README.md @@ -0,0 +1,4 @@ +# Clock + +This package provides an interface for time-based operations. It allows +mocking time for testing. diff --git a/vendor/k8s.io/utils/clock/clock.go b/vendor/k8s.io/utils/clock/clock.go new file mode 100644 index 000000000..dd181ce8d --- /dev/null +++ b/vendor/k8s.io/utils/clock/clock.go @@ -0,0 +1,168 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + // After returns the channel of a new Timer. + // This method does not allow to free/GC the backing timer before it fires. Use + // NewTimer instead. + After(d time.Duration) <-chan time.Time + // NewTimer returns a new Timer. + NewTimer(d time.Duration) Timer + // Sleep sleeps for the provided duration d. + // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. + Sleep(d time.Duration) + // Tick returns the channel of a new Ticker. + // This method does not allow to free/GC the backing ticker. Use + // NewTicker from WithTicker instead. + Tick(d time.Duration) <-chan time.Time +} + +// WithTicker allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type WithTicker interface { + Clock + // NewTicker returns a new Ticker. + NewTicker(time.Duration) Ticker +} + +// WithDelayedExecution allows for injecting fake or real clocks into +// code that needs to make use of AfterFunc functionality. +type WithDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// Ticker defines the Ticker interface. +type Ticker interface { + C() <-chan time.Time + Stop() +} + +var _ = WithTicker(RealClock{}) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +// This method does not allow to free/GC the backing timer before it fires. Use +// NewTimer instead. +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer is the same as time.NewTimer(d) +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// AfterFunc is the same as time.AfterFunc(d, f). +func (RealClock) AfterFunc(d time.Duration, f func()) Timer { + return &realTimer{ + timer: time.AfterFunc(d, f), + } +} + +// Tick is the same as time.Tick(d) +// This method does not allow to free/GC the backing ticker. Use +// NewTicker instead. +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep is the same as time.Sleep(d) +// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +type realTicker struct { + ticker *time.Ticker +} + +func (r *realTicker) C() <-chan time.Time { + return r.ticker.C +} + +func (r *realTicker) Stop() { + r.ticker.Stop() +} diff --git a/vendor/k8s.io/utils/clock/testing/fake_clock.go b/vendor/k8s.io/utils/clock/testing/fake_clock.go new file mode 100644 index 000000000..fb493c4ba --- /dev/null +++ b/vendor/k8s.io/utils/clock/testing/fake_clock.go @@ -0,0 +1,360 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "sync" + "time" + + "k8s.io/utils/clock" +) + +var ( + _ = clock.PassiveClock(&FakePassiveClock{}) + _ = clock.WithTicker(&FakeClock{}) + _ = clock.Clock(&IntervalClock{}) +) + +// FakePassiveClock implements PassiveClock, but returns an arbitrary time. +type FakePassiveClock struct { + lock sync.RWMutex + time time.Time +} + +// FakeClock implements clock.Clock, but returns an arbitrary time. +type FakeClock struct { + FakePassiveClock + + // waiters are waiting for the fake time to pass their specified time + waiters []*fakeClockWaiter +} + +type fakeClockWaiter struct { + targetTime time.Time + stepInterval time.Duration + skipIfBlocked bool + destChan chan time.Time + fired bool + afterFunc func() +} + +// NewFakePassiveClock returns a new FakePassiveClock. +func NewFakePassiveClock(t time.Time) *FakePassiveClock { + return &FakePassiveClock{ + time: t, + } +} + +// NewFakeClock constructs a fake clock set to the provided time. +func NewFakeClock(t time.Time) *FakeClock { + return &FakeClock{ + FakePassiveClock: *NewFakePassiveClock(t), + } +} + +// Now returns f's time. +func (f *FakePassiveClock) Now() time.Time { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time +} + +// Since returns time since the time in f. +func (f *FakePassiveClock) Since(ts time.Time) time.Duration { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time.Sub(ts) +} + +// SetTime sets the time on the FakePassiveClock. +func (f *FakePassiveClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.time = t +} + +// After is the fake version of time.After(d). +func (f *FakeClock) After(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + f.waiters = append(f.waiters, &fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }) + return ch +} + +// NewTimer constructs a fake timer, akin to time.NewTimer(d). +func (f *FakeClock) NewTimer(d time.Duration) clock.Timer { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + timer := &fakeTimer{ + fakeClock: f, + waiter: fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }, + } + f.waiters = append(f.waiters, &timer.waiter) + return timer +} + +// AfterFunc is the Fake version of time.AfterFunc(d, cb). +func (f *FakeClock) AfterFunc(d time.Duration, cb func()) clock.Timer { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + + timer := &fakeTimer{ + fakeClock: f, + waiter: fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + afterFunc: cb, + }, + } + f.waiters = append(f.waiters, &timer.waiter) + return timer +} + +// Tick constructs a fake ticker, akin to time.Tick +func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { + if d <= 0 { + return nil + } + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, &fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return ch +} + +// NewTicker returns a new Ticker. +func (f *FakeClock) NewTicker(d time.Duration) clock.Ticker { + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, &fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return &fakeTicker{ + c: ch, + } +} + +// Step moves the clock by Duration and notifies anyone that's called After, +// Tick, or NewTimer. +func (f *FakeClock) Step(d time.Duration) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(f.time.Add(d)) +} + +// SetTime sets the time. +func (f *FakeClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(t) +} + +// Actually changes the time and checks any waiters. f must be write-locked. +func (f *FakeClock) setTimeLocked(t time.Time) { + f.time = t + newWaiters := make([]*fakeClockWaiter, 0, len(f.waiters)) + for i := range f.waiters { + w := f.waiters[i] + if !w.targetTime.After(t) { + if w.skipIfBlocked { + select { + case w.destChan <- t: + w.fired = true + default: + } + } else { + w.destChan <- t + w.fired = true + } + + if w.afterFunc != nil { + w.afterFunc() + } + + if w.stepInterval > 0 { + for !w.targetTime.After(t) { + w.targetTime = w.targetTime.Add(w.stepInterval) + } + newWaiters = append(newWaiters, w) + } + + } else { + newWaiters = append(newWaiters, f.waiters[i]) + } + } + f.waiters = newWaiters +} + +// HasWaiters returns true if After or AfterFunc has been called on f but not yet satisfied (so you can +// write race-free tests). +func (f *FakeClock) HasWaiters() bool { + f.lock.RLock() + defer f.lock.RUnlock() + return len(f.waiters) > 0 +} + +// Sleep is akin to time.Sleep +func (f *FakeClock) Sleep(d time.Duration) { + f.Step(d) +} + +// IntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration. +// IntervalClock technically implements the other methods of clock.Clock, but each implementation is just a panic. +// See SimpleIntervalClock for an alternative that only has the methods of PassiveClock. +type IntervalClock struct { + Time time.Time + Duration time.Duration +} + +// Now returns i's time. +func (i *IntervalClock) Now() time.Time { + i.Time = i.Time.Add(i.Duration) + return i.Time +} + +// Since returns time since the time in i. +func (i *IntervalClock) Since(ts time.Time) time.Duration { + return i.Time.Sub(ts) +} + +// After is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) After(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement After") +} + +// NewTimer is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) NewTimer(d time.Duration) clock.Timer { + panic("IntervalClock doesn't implement NewTimer") +} + +// AfterFunc is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) AfterFunc(d time.Duration, f func()) clock.Timer { + panic("IntervalClock doesn't implement AfterFunc") +} + +// Tick is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement Tick") +} + +// NewTicker has no implementation yet and is omitted. +// TODO: make interval clock use FakeClock so this can be implemented. +//func (*IntervalClock) NewTicker(d time.Duration) clock.Ticker { +// panic("IntervalClock doesn't implement NewTicker") +//} + +// Sleep is unimplemented, will panic. +func (*IntervalClock) Sleep(d time.Duration) { + panic("IntervalClock doesn't implement Sleep") +} + +var _ = clock.Timer(&fakeTimer{}) + +// fakeTimer implements clock.Timer based on a FakeClock. +type fakeTimer struct { + fakeClock *FakeClock + waiter fakeClockWaiter +} + +// C returns the channel that notifies when this timer has fired. +func (f *fakeTimer) C() <-chan time.Time { + return f.waiter.destChan +} + +// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. +func (f *fakeTimer) Stop() bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + newWaiters := make([]*fakeClockWaiter, 0, len(f.fakeClock.waiters)) + for i := range f.fakeClock.waiters { + w := f.fakeClock.waiters[i] + if w != &f.waiter { + newWaiters = append(newWaiters, w) + } + } + + f.fakeClock.waiters = newWaiters + + return !f.waiter.fired +} + +// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet +// fired, or false otherwise. +func (f *fakeTimer) Reset(d time.Duration) bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + active := !f.waiter.fired + + f.waiter.fired = false + f.waiter.targetTime = f.fakeClock.time.Add(d) + + var isWaiting bool + for i := range f.fakeClock.waiters { + w := f.fakeClock.waiters[i] + if w == &f.waiter { + isWaiting = true + break + } + } + if !isWaiting { + f.fakeClock.waiters = append(f.fakeClock.waiters, &f.waiter) + } + + return active +} + +type fakeTicker struct { + c <-chan time.Time +} + +func (t *fakeTicker) C() <-chan time.Time { + return t.c +} + +func (t *fakeTicker) Stop() { +} diff --git a/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go b/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go new file mode 100644 index 000000000..951ca4d17 --- /dev/null +++ b/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go @@ -0,0 +1,44 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "time" + + "k8s.io/utils/clock" +) + +var ( + _ = clock.PassiveClock(&SimpleIntervalClock{}) +) + +// SimpleIntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration +type SimpleIntervalClock struct { + Time time.Time + Duration time.Duration +} + +// Now returns i's time. +func (i *SimpleIntervalClock) Now() time.Time { + i.Time = i.Time.Add(i.Duration) + return i.Time +} + +// Since returns time since the time in i. +func (i *SimpleIntervalClock) Since(ts time.Time) time.Duration { + return i.Time.Sub(ts) +} diff --git a/vendor/k8s.io/utils/lru/lru.go b/vendor/k8s.io/utils/lru/lru.go index d4e33a62c..5d0077abf 100644 --- a/vendor/k8s.io/utils/lru/lru.go +++ b/vendor/k8s.io/utils/lru/lru.go @@ -45,8 +45,8 @@ func (c *Cache) Add(key Key, value interface{}) { // Get looks up a key's value from the cache. func (c *Cache) Get(key Key) (value interface{}, ok bool) { - c.lock.RLock() - defer c.lock.RUnlock() + c.lock.Lock() + defer c.lock.Unlock() return c.cache.Get(key) } diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index 1da6f6664..2cab2c580 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -46,6 +46,24 @@ func AllPtrFieldsNil(obj interface{}) bool { return true } +// Int returns a pointer to an int +func Int(i int) *int { + return &i +} + +var IntPtr = Int // for back-compat + +// IntDeref dereferences the int ptr and returns it if not nil, or else +// returns def. +func IntDeref(ptr *int, def int) int { + if ptr != nil { + return *ptr + } + return def +} + +var IntPtrDerefOr = IntDeref // for back-compat + // Int32 returns a pointer to an int32. func Int32(i int32) *int32 { return &i diff --git a/vendor/modules.txt b/vendor/modules.txt index 0391e61bb..26f8c6bdd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go v0.54.0 +# cloud.google.com/go v0.81.0 ## explicit; go 1.11 cloud.google.com/go/compute/metadata # github.com/Azure/go-autorest v14.2.0+incompatible @@ -59,7 +59,7 @@ github.com/davecgh/go-spew/spew ## explicit github.com/emicklei/go-restful github.com/emicklei/go-restful/log -# github.com/evanphx/json-patch v4.11.0+incompatible +# github.com/evanphx/json-patch v4.12.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/felixge/httpsnoop v1.0.1 @@ -68,8 +68,11 @@ github.com/felixge/httpsnoop # github.com/form3tech-oss/jwt-go v3.2.3+incompatible ## explicit github.com/form3tech-oss/jwt-go -# github.com/go-logr/logr v0.4.0 -## explicit; go 1.14 +# github.com/fsnotify/fsnotify v1.4.9 +## explicit; go 1.13 +github.com/fsnotify/fsnotify +# github.com/go-logr/logr v1.2.0 +## explicit; go 1.16 github.com/go-logr/logr # github.com/go-openapi/jsonpointer v0.19.5 ## explicit; go 1.13 @@ -124,6 +127,7 @@ github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions github.com/googleapis/gnostic/jsonschema github.com/googleapis/gnostic/openapiv2 +github.com/googleapis/gnostic/openapiv3 # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus @@ -141,7 +145,7 @@ github.com/inconshreveable/mousetrap # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 github.com/josharian/intern -# github.com/json-iterator/go v1.1.11 +# github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go # github.com/mailru/easyjson v0.7.6 @@ -160,8 +164,8 @@ github.com/mmarkdown/mmark/mparser # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent -# github.com/modern-go/reflect2 v1.0.1 -## explicit +# github.com/modern-go/reflect2 v1.0.2 +## explicit; go 1.12 github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit @@ -179,8 +183,8 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint # github.com/prometheus/client_model v0.2.0 ## explicit; go 1.9 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.26.0 -## explicit; go 1.11 +# github.com/prometheus/common v0.28.0 +## explicit; go 1.13 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model @@ -189,8 +193,8 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/spf13/cobra v1.1.3 -## explicit; go 1.12 +# github.com/spf13/cobra v1.2.1 +## explicit; go 1.14 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 @@ -292,7 +296,7 @@ go.uber.org/atomic # go.uber.org/multierr v1.6.0 ## explicit; go 1.12 go.uber.org/multierr -# go.uber.org/zap v1.17.0 +# go.uber.org/zap v1.19.0 ## explicit; go 1.13 go.uber.org/zap go.uber.org/zap/buffer @@ -301,8 +305,8 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 -## explicit; go 1.11 +# golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 +## explicit; go 1.17 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/internal/subtle @@ -315,7 +319,7 @@ golang.org/x/crypto/salsa20/salsa ## explicit; go 1.12 golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 +# golang.org/x/net v0.0.0-20210825183410-e898025ed96a ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -326,17 +330,19 @@ golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d +# golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f ## explicit; go 1.11 golang.org/x/oauth2 +golang.org/x/oauth2/authhandler golang.org/x/oauth2/google +golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20210220032951-036812b2e83c ## explicit golang.org/x/sync/singleflight -# golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 +# golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -344,11 +350,11 @@ golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d -## explicit; go 1.11 +# golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b +## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.3.6 -## explicit; go 1.11 +# golang.org/x/text v0.3.7 +## explicit; go 1.17 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi @@ -357,9 +363,13 @@ golang.org/x/text/width # golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.1.2 +# golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff ## explicit; go 1.17 golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/gcimporter +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/packages golang.org/x/tools/imports golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -369,11 +379,14 @@ golang.org/x/tools/internal/fastwalk golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports +golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/typeparams +golang.org/x/tools/internal/typesinternal # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 ## explicit; go 1.11 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/appengine v1.6.5 +# google.golang.org/appengine v1.6.7 ## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/internal @@ -385,13 +398,13 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c +# google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 ## explicit; go 1.11 google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.38.0 +# google.golang.org/grpc v1.40.0 ## explicit; go 1.11 google.golang.org/grpc google.golang.org/grpc/attributes @@ -439,7 +452,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.26.0 +# google.golang.org/protobuf v1.27.1 ## explicit; go 1.9 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -487,7 +500,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.22.0 +# k8s.io/api v0.23.0 ## explicit; go 1.16 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -502,6 +515,7 @@ k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 k8s.io/api/autoscaling/v1 +k8s.io/api/autoscaling/v2 k8s.io/api/autoscaling/v2beta1 k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 @@ -518,6 +532,7 @@ k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/flowcontrol/v1alpha1 k8s.io/api/flowcontrol/v1beta1 +k8s.io/api/flowcontrol/v1beta2 k8s.io/api/networking/v1 k8s.io/api/networking/v1beta1 k8s.io/api/node/v1 @@ -534,7 +549,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.22.0 +# k8s.io/apimachinery v0.23.0 ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -565,7 +580,6 @@ k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/cache -k8s.io/apimachinery/pkg/util/clock k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer @@ -589,7 +603,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.22.0 +# k8s.io/apiserver v0.23.0 ## explicit; go 1.16 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -657,6 +671,7 @@ k8s.io/apiserver/pkg/endpoints/handlers/responsewriters k8s.io/apiserver/pkg/endpoints/metrics k8s.io/apiserver/pkg/endpoints/openapi k8s.io/apiserver/pkg/endpoints/request +k8s.io/apiserver/pkg/endpoints/responsewriter k8s.io/apiserver/pkg/endpoints/warning k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/quota/v1 @@ -678,6 +693,7 @@ k8s.io/apiserver/pkg/server/routes k8s.io/apiserver/pkg/server/storage k8s.io/apiserver/pkg/storage k8s.io/apiserver/pkg/storage/cacher +k8s.io/apiserver/pkg/storage/cacher/metrics k8s.io/apiserver/pkg/storage/errors k8s.io/apiserver/pkg/storage/etcd3 k8s.io/apiserver/pkg/storage/etcd3/metrics @@ -696,11 +712,10 @@ k8s.io/apiserver/pkg/util/apihelpers k8s.io/apiserver/pkg/util/dryrun k8s.io/apiserver/pkg/util/feature k8s.io/apiserver/pkg/util/flowcontrol -k8s.io/apiserver/pkg/util/flowcontrol/counter k8s.io/apiserver/pkg/util/flowcontrol/debug k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing +k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/eventclock k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise -k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset k8s.io/apiserver/pkg/util/flowcontrol/format k8s.io/apiserver/pkg/util/flowcontrol/metrics @@ -718,7 +733,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.22.0 +# k8s.io/client-go v0.23.0 ## explicit; go 1.16 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -727,6 +742,7 @@ k8s.io/client-go/applyconfigurations/apps/v1 k8s.io/client-go/applyconfigurations/apps/v1beta1 k8s.io/client-go/applyconfigurations/apps/v1beta2 k8s.io/client-go/applyconfigurations/autoscaling/v1 +k8s.io/client-go/applyconfigurations/autoscaling/v2 k8s.io/client-go/applyconfigurations/autoscaling/v2beta1 k8s.io/client-go/applyconfigurations/autoscaling/v2beta2 k8s.io/client-go/applyconfigurations/batch/v1 @@ -743,6 +759,7 @@ k8s.io/client-go/applyconfigurations/events/v1beta1 k8s.io/client-go/applyconfigurations/extensions/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1 +k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2 k8s.io/client-go/applyconfigurations/internal k8s.io/client-go/applyconfigurations/meta/v1 k8s.io/client-go/applyconfigurations/networking/v1 @@ -775,6 +792,7 @@ k8s.io/client-go/informers/apps/v1beta1 k8s.io/client-go/informers/apps/v1beta2 k8s.io/client-go/informers/autoscaling k8s.io/client-go/informers/autoscaling/v1 +k8s.io/client-go/informers/autoscaling/v2 k8s.io/client-go/informers/autoscaling/v2beta1 k8s.io/client-go/informers/autoscaling/v2beta2 k8s.io/client-go/informers/batch @@ -799,6 +817,7 @@ k8s.io/client-go/informers/extensions/v1beta1 k8s.io/client-go/informers/flowcontrol k8s.io/client-go/informers/flowcontrol/v1alpha1 k8s.io/client-go/informers/flowcontrol/v1beta1 +k8s.io/client-go/informers/flowcontrol/v1beta2 k8s.io/client-go/informers/internalinterfaces k8s.io/client-go/informers/networking k8s.io/client-go/informers/networking/v1 @@ -847,6 +866,8 @@ k8s.io/client-go/kubernetes/typed/authorization/v1beta1 k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake k8s.io/client-go/kubernetes/typed/autoscaling/v1 k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake +k8s.io/client-go/kubernetes/typed/autoscaling/v2 +k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1 k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 @@ -879,6 +900,8 @@ k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake +k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake k8s.io/client-go/kubernetes/typed/networking/v1 k8s.io/client-go/kubernetes/typed/networking/v1/fake k8s.io/client-go/kubernetes/typed/networking/v1beta1 @@ -918,6 +941,7 @@ k8s.io/client-go/listers/apps/v1 k8s.io/client-go/listers/apps/v1beta1 k8s.io/client-go/listers/apps/v1beta2 k8s.io/client-go/listers/autoscaling/v1 +k8s.io/client-go/listers/autoscaling/v2 k8s.io/client-go/listers/autoscaling/v2beta1 k8s.io/client-go/listers/autoscaling/v2beta2 k8s.io/client-go/listers/batch/v1 @@ -934,6 +958,7 @@ k8s.io/client-go/listers/events/v1beta1 k8s.io/client-go/listers/extensions/v1beta1 k8s.io/client-go/listers/flowcontrol/v1alpha1 k8s.io/client-go/listers/flowcontrol/v1beta1 +k8s.io/client-go/listers/flowcontrol/v1beta2 k8s.io/client-go/listers/networking/v1 k8s.io/client-go/listers/networking/v1beta1 k8s.io/client-go/listers/node/v1 @@ -987,7 +1012,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.22.0 +# k8s.io/code-generator v0.23.0 ## explicit; go 1.16 k8s.io/code-generator k8s.io/code-generator/cmd/client-gen @@ -1022,7 +1047,7 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.22.0 +# k8s.io/component-base v0.23.0 ## explicit; go 1.16 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -1030,6 +1055,7 @@ k8s.io/component-base/config/v1alpha1 k8s.io/component-base/featuregate k8s.io/component-base/logs k8s.io/component-base/logs/datapol +k8s.io/component-base/logs/registry k8s.io/component-base/logs/sanitization k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry @@ -1037,11 +1063,11 @@ k8s.io/component-base/metrics/prometheus/workqueue k8s.io/component-base/metrics/testutil k8s.io/component-base/traces k8s.io/component-base/version -# k8s.io/component-helpers v0.22.0 +# k8s.io/component-helpers v0.23.0 ## explicit; go 1.16 k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity -# k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 +# k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c ## explicit; go 1.13 k8s.io/gengo/args k8s.io/gengo/examples/deepcopy-gen/generators @@ -1053,18 +1079,22 @@ k8s.io/gengo/generator k8s.io/gengo/namer k8s.io/gengo/parser k8s.io/gengo/types -# k8s.io/klog/v2 v2.9.0 +# k8s.io/klog/v2 v2.30.0 ## explicit; go 1.13 k8s.io/klog/v2 -# k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e -## explicit; go 1.12 +# k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 +## explicit; go 1.16 k8s.io/kube-openapi/cmd/openapi-gen/args k8s.io/kube-openapi/pkg/builder +k8s.io/kube-openapi/pkg/builder3 k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/generators k8s.io/kube-openapi/pkg/generators/rules k8s.io/kube-openapi/pkg/handler +k8s.io/kube-openapi/pkg/handler3 k8s.io/kube-openapi/pkg/schemaconv +k8s.io/kube-openapi/pkg/schemamutation +k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets @@ -1072,9 +1102,11 @@ k8s.io/kube-openapi/pkg/validation/spec # k8s.io/kubectl v0.20.5 ## explicit; go 1.15 k8s.io/kubectl/pkg/util/qos -# k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 +# k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b ## explicit; go 1.12 k8s.io/utils/buffer +k8s.io/utils/clock +k8s.io/utils/clock/testing k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/golang-lru k8s.io/utils/internal/third_party/forked/golang/net @@ -1083,15 +1115,19 @@ k8s.io/utils/net k8s.io/utils/path k8s.io/utils/pointer k8s.io/utils/trace -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 -## explicit; go 1.13 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 +## explicit; go 1.17 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client +# sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 +## explicit; go 1.16 +sigs.k8s.io/json +sigs.k8s.io/json/internal/golang/encoding/json # sigs.k8s.io/mdtoc v1.0.1 ## explicit; go 1.15 sigs.k8s.io/mdtoc sigs.k8s.io/mdtoc/pkg/mdtoc -# sigs.k8s.io/structured-merge-diff/v4 v4.1.2 +# sigs.k8s.io/structured-merge-diff/v4 v4.2.0 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go index 761ac71fa..31bf9e11f 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go @@ -210,7 +210,7 @@ func (t *grpcTunnel) DialContext(ctx context.Context, protocol, address string) klog.V(5).Infoln("DIAL_REQ sent to proxy server") - c := &conn{stream: t.stream} + c := &conn{stream: t.stream, random: random} select { case res := <-resCh: diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go index 4a93c69cf..cc6e66be2 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go @@ -35,6 +35,7 @@ const CloseTimeout = 10 * time.Second type conn struct { stream client.ProxyService_ProxyClient connID int64 + random int64 readCh chan []byte closeCh chan string rdata []byte @@ -113,13 +114,26 @@ func (c *conn) SetWriteDeadline(t time.Time) error { // proxy service to notify remote to drop the connection. func (c *conn) Close() error { klog.V(4).Infoln("closing connection") - req := &client.Packet{ - Type: client.PacketType_CLOSE_REQ, - Payload: &client.Packet_CloseRequest{ - CloseRequest: &client.CloseRequest{ - ConnectID: c.connID, + var req *client.Packet + if c.connID != 0 { + req = &client.Packet{ + Type: client.PacketType_CLOSE_REQ, + Payload: &client.Packet_CloseRequest{ + CloseRequest: &client.CloseRequest{ + ConnectID: c.connID, + }, }, - }, + } + } else { + // Never received a DIAL response so no connection ID. + req = &client.Packet{ + Type: client.PacketType_DIAL_CLS, + Payload: &client.Packet_CloseDial{ + CloseDial: &client.CloseDial{ + Random: c.random, + }, + }, + } } klog.V(5).InfoS("[tracing] send req", "type", req.Type) diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go index 107f2e546..59a797df4 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go @@ -47,6 +47,7 @@ const ( PacketType_CLOSE_REQ PacketType = 2 PacketType_CLOSE_RSP PacketType = 3 PacketType_DATA PacketType = 4 + PacketType_DIAL_CLS PacketType = 5 ) var PacketType_name = map[int32]string{ @@ -55,6 +56,7 @@ var PacketType_name = map[int32]string{ 2: "CLOSE_REQ", 3: "CLOSE_RSP", 4: "DATA", + 5: "DIAL_CLS", } var PacketType_value = map[string]int32{ @@ -63,6 +65,7 @@ var PacketType_value = map[string]int32{ "CLOSE_REQ": 2, "CLOSE_RSP": 3, "DATA": 4, + "DIAL_CLS": 5, } func (x PacketType) String() string { @@ -103,6 +106,7 @@ type Packet struct { // *Packet_Data // *Packet_CloseRequest // *Packet_CloseResponse + // *Packet_CloseDial Payload isPacket_Payload `protobuf_oneof:"payload"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -165,6 +169,10 @@ type Packet_CloseResponse struct { CloseResponse *CloseResponse `protobuf:"bytes,6,opt,name=closeResponse,proto3,oneof"` } +type Packet_CloseDial struct { + CloseDial *CloseDial `protobuf:"bytes,7,opt,name=closeDial,proto3,oneof"` +} + func (*Packet_DialRequest) isPacket_Payload() {} func (*Packet_DialResponse) isPacket_Payload() {} @@ -175,6 +183,8 @@ func (*Packet_CloseRequest) isPacket_Payload() {} func (*Packet_CloseResponse) isPacket_Payload() {} +func (*Packet_CloseDial) isPacket_Payload() {} + func (m *Packet) GetPayload() isPacket_Payload { if m != nil { return m.Payload @@ -217,6 +227,13 @@ func (m *Packet) GetCloseResponse() *CloseResponse { return nil } +func (m *Packet) GetCloseDial() *CloseDial { + if x, ok := m.GetPayload().(*Packet_CloseDial); ok { + return x.CloseDial + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Packet) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -225,6 +242,7 @@ func (*Packet) XXX_OneofWrappers() []interface{} { (*Packet_Data)(nil), (*Packet_CloseRequest)(nil), (*Packet_CloseResponse)(nil), + (*Packet_CloseDial)(nil), } } @@ -433,6 +451,46 @@ func (m *CloseResponse) GetConnectID() int64 { return 0 } +type CloseDial struct { + // random id of the DialRequest + Random int64 `protobuf:"varint,1,opt,name=random,proto3" json:"random,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseDial) Reset() { *m = CloseDial{} } +func (m *CloseDial) String() string { return proto.CompactTextString(m) } +func (*CloseDial) ProtoMessage() {} +func (*CloseDial) Descriptor() ([]byte, []int) { + return fileDescriptor_fec4258d9ecd175d, []int{5} +} + +func (m *CloseDial) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseDial.Unmarshal(m, b) +} +func (m *CloseDial) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseDial.Marshal(b, m, deterministic) +} +func (m *CloseDial) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseDial.Merge(m, src) +} +func (m *CloseDial) XXX_Size() int { + return xxx_messageInfo_CloseDial.Size(m) +} +func (m *CloseDial) XXX_DiscardUnknown() { + xxx_messageInfo_CloseDial.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseDial proto.InternalMessageInfo + +func (m *CloseDial) GetRandom() int64 { + if m != nil { + return m.Random + } + return 0 +} + type Data struct { // connectID to connect to ConnectID int64 `protobuf:"varint,1,opt,name=connectID,proto3" json:"connectID,omitempty"` @@ -449,7 +507,7 @@ func (m *Data) Reset() { *m = Data{} } func (m *Data) String() string { return proto.CompactTextString(m) } func (*Data) ProtoMessage() {} func (*Data) Descriptor() ([]byte, []int) { - return fileDescriptor_fec4258d9ecd175d, []int{5} + return fileDescriptor_fec4258d9ecd175d, []int{6} } func (m *Data) XXX_Unmarshal(b []byte) error { @@ -499,6 +557,7 @@ func init() { proto.RegisterType((*DialResponse)(nil), "DialResponse") proto.RegisterType((*CloseRequest)(nil), "CloseRequest") proto.RegisterType((*CloseResponse)(nil), "CloseResponse") + proto.RegisterType((*CloseDial)(nil), "CloseDial") proto.RegisterType((*Data)(nil), "Data") } @@ -507,37 +566,39 @@ func init() { } var fileDescriptor_fec4258d9ecd175d = []byte{ - // 472 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xd1, 0x6e, 0x9b, 0x30, - 0x14, 0x85, 0x00, 0x49, 0xb8, 0x21, 0x15, 0xb2, 0xa6, 0x09, 0x75, 0x93, 0x5a, 0xf1, 0x14, 0x55, - 0x0b, 0x54, 0xa9, 0x34, 0xed, 0x35, 0x0d, 0xa9, 0x52, 0xa9, 0x5a, 0x99, 0xd3, 0xa7, 0xee, 0x61, - 0xf2, 0xc0, 0x9a, 0x50, 0x18, 0x66, 0xb6, 0x97, 0x8d, 0x0f, 0xda, 0x7f, 0x4e, 0x18, 0x52, 0xc8, - 0xa4, 0x6d, 0x52, 0x9f, 0xe0, 0x1c, 0xdf, 0x73, 0x7c, 0x7d, 0xae, 0x0d, 0xf3, 0x1d, 0x2b, 0x0a, - 0x9a, 0xc8, 0x6c, 0x9f, 0xc9, 0x6a, 0x9e, 0xe4, 0x19, 0x2d, 0x64, 0x58, 0x72, 0x26, 0x59, 0xd8, - 0x82, 0xe6, 0x13, 0x28, 0xce, 0xff, 0x35, 0x80, 0x61, 0x4c, 0x92, 0x1d, 0x95, 0xe8, 0x0c, 0x4c, - 0x59, 0x95, 0xd4, 0xd3, 0xcf, 0xf5, 0xd9, 0xc9, 0x62, 0x12, 0x34, 0xf4, 0x43, 0x55, 0x52, 0xac, - 0x16, 0xd0, 0x25, 0x4c, 0xd2, 0x8c, 0xe4, 0x98, 0x7e, 0xfb, 0x4e, 0x85, 0xf4, 0x06, 0xe7, 0xfa, - 0x6c, 0xb2, 0x70, 0x82, 0xa8, 0xe3, 0x36, 0x1a, 0xee, 0x97, 0xa0, 0x2b, 0x70, 0x1a, 0x28, 0x4a, - 0x56, 0x08, 0xea, 0x19, 0x4a, 0x32, 0x6d, 0x25, 0x0d, 0xb9, 0xd1, 0xf0, 0x51, 0x11, 0x7a, 0x05, - 0x66, 0x4a, 0x24, 0xf1, 0x4c, 0x55, 0x6c, 0x05, 0x11, 0x91, 0x64, 0xa3, 0x61, 0x45, 0xd6, 0x8e, - 0x49, 0xce, 0x04, 0x3d, 0x34, 0x61, 0xb5, 0x8e, 0xab, 0x1e, 0x59, 0x3b, 0xf6, 0x8b, 0xd0, 0x5b, - 0x98, 0xb6, 0xb8, 0xed, 0x63, 0xa8, 0x54, 0x27, 0x07, 0xd5, 0x53, 0x23, 0xc7, 0x65, 0xd7, 0x36, - 0x8c, 0x4a, 0x52, 0xe5, 0x8c, 0xa4, 0xfe, 0x47, 0x98, 0xf4, 0xce, 0x89, 0x4e, 0x61, 0xac, 0xf2, - 0x4b, 0x58, 0xae, 0xf2, 0xb2, 0xf1, 0x13, 0x46, 0x1e, 0x8c, 0x48, 0x9a, 0x72, 0x2a, 0x84, 0x8a, - 0xc8, 0xc6, 0x07, 0x88, 0x5e, 0xc2, 0x90, 0x93, 0x22, 0x65, 0x5f, 0x55, 0x10, 0x06, 0x6e, 0x91, - 0xff, 0x08, 0x4e, 0x3f, 0x11, 0xf4, 0x02, 0x2c, 0xca, 0x39, 0xe3, 0xad, 0x75, 0x03, 0xd0, 0x6b, - 0xb0, 0x93, 0x66, 0xb6, 0xb7, 0x91, 0x72, 0x36, 0x70, 0x47, 0xfc, 0xd5, 0xfb, 0x0d, 0x38, 0xfd, - 0x6c, 0x8e, 0x5d, 0xf4, 0x3f, 0x5c, 0xfc, 0x15, 0x4c, 0x8f, 0x32, 0x79, 0x4e, 0x2b, 0xfe, 0x7b, - 0x30, 0xeb, 0x99, 0xfd, 0x7b, 0xab, 0xce, 0x79, 0xd0, 0x77, 0x46, 0xed, 0xf0, 0xeb, 0x43, 0x38, - 0xcd, 0xcc, 0x2f, 0x62, 0x80, 0xee, 0x2e, 0x22, 0x07, 0xc6, 0xd1, 0xed, 0xf2, 0xee, 0x13, 0x5e, - 0x7f, 0x70, 0xb5, 0x0e, 0x6d, 0x63, 0x57, 0x47, 0x53, 0xb0, 0x57, 0x77, 0xf7, 0xdb, 0xb5, 0x5a, - 0x1c, 0xf4, 0xe0, 0x36, 0x76, 0x0d, 0x34, 0x06, 0x33, 0x5a, 0x3e, 0x2c, 0x5d, 0xf3, 0xc2, 0x05, - 0x6b, 0xad, 0xb6, 0x1b, 0x81, 0xb1, 0xbe, 0xbf, 0x71, 0xb5, 0x45, 0x08, 0x4e, 0xcc, 0xd9, 0xcf, - 0x6a, 0x4b, 0xf9, 0x3e, 0x4b, 0x28, 0x3a, 0x03, 0x4b, 0x61, 0x34, 0x6a, 0xdf, 0xc1, 0xe9, 0xe1, - 0xc7, 0xd7, 0x66, 0xfa, 0xa5, 0x7e, 0x7d, 0xf3, 0x18, 0x89, 0xec, 0x8b, 0x08, 0x76, 0xef, 0x44, - 0x90, 0xb1, 0x90, 0x94, 0x99, 0xa0, 0x7c, 0x4f, 0xf9, 0xbc, 0xa0, 0xf2, 0x07, 0xe3, 0xbb, 0x79, - 0x59, 0xcb, 0xc3, 0xff, 0xbd, 0xc6, 0xcf, 0x43, 0x85, 0xae, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, - 0x64, 0xe0, 0x62, 0xbe, 0xb8, 0x03, 0x00, 0x00, + // 505 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x51, 0x8b, 0xd3, 0x40, + 0x18, 0x4c, 0xda, 0xa4, 0x6d, 0xbe, 0xa6, 0x47, 0x58, 0x44, 0xc2, 0x29, 0xdc, 0x11, 0x5f, 0x4a, + 0xb1, 0xe9, 0xd1, 0x03, 0xf1, 0xb5, 0xd7, 0xf4, 0xe8, 0x41, 0xf1, 0xea, 0xf6, 0x9e, 0x4e, 0x50, + 0xd6, 0x64, 0x91, 0xd0, 0x98, 0x8d, 0xbb, 0x6b, 0x35, 0x3f, 0xd3, 0x7f, 0x24, 0xd9, 0xa4, 0x4d, + 0x22, 0xa8, 0x70, 0x4f, 0xed, 0xcc, 0x7e, 0x33, 0x3b, 0x19, 0xbe, 0x85, 0xe9, 0x9e, 0xa5, 0x29, + 0x0d, 0x65, 0x7c, 0x88, 0x65, 0x3e, 0x0d, 0x93, 0x98, 0xa6, 0x72, 0x96, 0x71, 0x26, 0xd9, 0xac, + 0x02, 0xe5, 0x8f, 0xaf, 0x38, 0xef, 0x57, 0x07, 0x7a, 0x5b, 0x12, 0xee, 0xa9, 0x44, 0x17, 0x60, + 0xc8, 0x3c, 0xa3, 0xae, 0x7e, 0xa9, 0x8f, 0xcf, 0xe6, 0x43, 0xbf, 0xa4, 0x1f, 0xf2, 0x8c, 0x62, + 0x75, 0x80, 0xae, 0x60, 0x18, 0xc5, 0x24, 0xc1, 0xf4, 0xdb, 0x77, 0x2a, 0xa4, 0xdb, 0xb9, 0xd4, + 0xc7, 0xc3, 0xb9, 0xed, 0x07, 0x35, 0xb7, 0xd6, 0x70, 0x73, 0x04, 0x5d, 0x83, 0x5d, 0x42, 0x91, + 0xb1, 0x54, 0x50, 0xb7, 0xab, 0x24, 0xa3, 0x4a, 0x52, 0x92, 0x6b, 0x0d, 0xb7, 0x86, 0xd0, 0x0b, + 0x30, 0x22, 0x22, 0x89, 0x6b, 0xa8, 0x61, 0xd3, 0x0f, 0x88, 0x24, 0x6b, 0x0d, 0x2b, 0xb2, 0x70, + 0x0c, 0x13, 0x26, 0xe8, 0x31, 0x84, 0x59, 0x39, 0x2e, 0x1b, 0x64, 0xe1, 0xd8, 0x1c, 0x42, 0x6f, + 0x60, 0x54, 0xe1, 0x2a, 0x47, 0x4f, 0xa9, 0xce, 0x8e, 0xaa, 0x53, 0x90, 0xf6, 0x18, 0x9a, 0x80, + 0xa5, 0x88, 0x22, 0xae, 0xdb, 0x57, 0x1a, 0x28, 0x35, 0x05, 0xb3, 0xd6, 0x70, 0x7d, 0x7c, 0x63, + 0x41, 0x3f, 0x23, 0x79, 0xc2, 0x48, 0xe4, 0x7d, 0x80, 0x61, 0xa3, 0x13, 0x74, 0x0e, 0x03, 0xd5, + 0x75, 0xc8, 0x12, 0xd5, 0xad, 0x85, 0x4f, 0x18, 0xb9, 0xd0, 0x27, 0x51, 0xc4, 0xa9, 0x10, 0xaa, + 0x4e, 0x0b, 0x1f, 0x21, 0x7a, 0x0e, 0x3d, 0x4e, 0xd2, 0x88, 0x7d, 0x55, 0xa5, 0x75, 0x71, 0x85, + 0xbc, 0x47, 0xb0, 0x9b, 0xed, 0xa1, 0x67, 0x60, 0x52, 0xce, 0x19, 0xaf, 0xac, 0x4b, 0x80, 0x5e, + 0x82, 0x15, 0x96, 0x7b, 0x70, 0x17, 0x28, 0xe7, 0x2e, 0xae, 0x89, 0xbf, 0x7a, 0xbf, 0x06, 0xbb, + 0xd9, 0x63, 0xdb, 0x45, 0xff, 0xc3, 0xc5, 0x5b, 0xc2, 0xa8, 0xd5, 0xdf, 0x53, 0xa2, 0x78, 0xaf, + 0xc0, 0x3a, 0x15, 0xda, 0xc8, 0xa5, 0xb7, 0x72, 0xbd, 0x03, 0xa3, 0x58, 0x82, 0x7f, 0xe7, 0xa9, + 0xaf, 0xef, 0x34, 0xaf, 0x47, 0xd5, 0x36, 0x15, 0x5f, 0x6a, 0x97, 0x4b, 0x34, 0xf9, 0x08, 0x50, + 0x2f, 0x37, 0xb2, 0x61, 0x10, 0xdc, 0x2d, 0x36, 0x9f, 0xf0, 0xea, 0xbd, 0xa3, 0xd5, 0x68, 0xb7, + 0x75, 0x74, 0x34, 0x02, 0x6b, 0xb9, 0xb9, 0xdf, 0xad, 0xd4, 0x61, 0xa7, 0x01, 0x77, 0x5b, 0xa7, + 0x8b, 0x06, 0x60, 0x04, 0x8b, 0x87, 0x85, 0x63, 0x9c, 0x54, 0xcb, 0xcd, 0xce, 0x31, 0x27, 0x0e, + 0x98, 0x2b, 0x75, 0x79, 0x1f, 0xba, 0xab, 0xfb, 0x5b, 0x47, 0x9b, 0xcf, 0xc0, 0xde, 0x72, 0xf6, + 0x33, 0xdf, 0x51, 0x7e, 0x88, 0x43, 0x8a, 0x2e, 0xc0, 0x54, 0x18, 0xf5, 0xab, 0x67, 0x76, 0x7e, + 0xfc, 0xe3, 0x69, 0x63, 0xfd, 0x4a, 0xbf, 0xb9, 0x7d, 0x0c, 0x44, 0xfc, 0x45, 0xf8, 0xfb, 0xb7, + 0xc2, 0x8f, 0xd9, 0x8c, 0x64, 0xb1, 0xa0, 0xfc, 0x40, 0xf9, 0x34, 0xa5, 0xf2, 0x07, 0xe3, 0xfb, + 0x69, 0x56, 0xc8, 0x67, 0xff, 0x7b, 0xec, 0x9f, 0x7b, 0x0a, 0x5d, 0xff, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x38, 0x1b, 0xf6, 0x4f, 0x17, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto index 3aadac064..0c4cff8cd 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto @@ -29,6 +29,7 @@ enum PacketType { CLOSE_REQ = 2; CLOSE_RSP = 3; DATA = 4; + DIAL_CLS = 5; } enum Error { @@ -45,6 +46,7 @@ message Packet { Data data = 4; CloseRequest closeRequest = 5; CloseResponse closeResponse = 6; + CloseDial closeDial = 7; } } @@ -83,6 +85,11 @@ message CloseResponse { int64 connectID = 2; } +message CloseDial { + // random id of the DialRequest + int64 random = 1; +} + message Data { // connectID to connect to int64 connectID = 1; diff --git a/vendor/sigs.k8s.io/json/CONTRIBUTING.md b/vendor/sigs.k8s.io/json/CONTRIBUTING.md new file mode 100644 index 000000000..3bcd26689 --- /dev/null +++ b/vendor/sigs.k8s.io/json/CONTRIBUTING.md @@ -0,0 +1,42 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Criteria for adding code here + +This library adapts the stdlib `encoding/json` decoder to be compatible with +Kubernetes JSON decoding, and is not expected to actively add new features. + +It may be updated with changes from the stdlib `encoding/json` decoder. + +Any code that is added must: +* Have full unit test and benchmark coverage +* Be backward compatible with the existing exposed go API +* Have zero external dependencies +* Preserve existing benchmark performance +* Preserve compatibility with existing decoding behavior of `UnmarshalCaseSensitivePreserveInts()` or `UnmarshalStrict()` +* Avoid use of `unsafe` + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers + +## Community, discussion, contribution, and support + +You can reach the maintainers of this project via the +[sig-api-machinery mailing list / channels](https://github.com/kubernetes/community/tree/master/sig-api-machinery#contact). + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + diff --git a/vendor/sigs.k8s.io/json/LICENSE b/vendor/sigs.k8s.io/json/LICENSE new file mode 100644 index 000000000..e5adf7f0c --- /dev/null +++ b/vendor/sigs.k8s.io/json/LICENSE @@ -0,0 +1,238 @@ +Files other than internal/golang/* licensed under: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +------------------ + +internal/golang/* files licensed under: + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/sigs.k8s.io/json/Makefile b/vendor/sigs.k8s.io/json/Makefile new file mode 100644 index 000000000..07b8bfa85 --- /dev/null +++ b/vendor/sigs.k8s.io/json/Makefile @@ -0,0 +1,35 @@ +.PHONY: default build test benchmark fmt vet + +default: build + +build: + go build ./... + +test: + go test sigs.k8s.io/json/... + +benchmark: + go test sigs.k8s.io/json -bench . -benchmem + +fmt: + go mod tidy + gofmt -s -w *.go + +vet: + go vet sigs.k8s.io/json + + @echo "checking for external dependencies" + @deps=$$(go mod graph); \ + if [ -n "$${deps}" ]; then \ + echo "only stdlib dependencies allowed, found:"; \ + echo "$${deps}"; \ + exit 1; \ + fi + + @echo "checking for unsafe use" + @unsafe=$$(go list -f '{{.ImportPath}} depends on {{.Imports}}' sigs.k8s.io/json/... | grep unsafe || true); \ + if [ -n "$${unsafe}" ]; then \ + echo "no dependencies on unsafe allowed, found:"; \ + echo "$${unsafe}"; \ + exit 1; \ + fi diff --git a/vendor/sigs.k8s.io/json/OWNERS b/vendor/sigs.k8s.io/json/OWNERS new file mode 100644 index 000000000..0fadafbdd --- /dev/null +++ b/vendor/sigs.k8s.io/json/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - deads2k + - lavalamp + - liggitt diff --git a/vendor/sigs.k8s.io/json/README.md b/vendor/sigs.k8s.io/json/README.md new file mode 100644 index 000000000..0ef8f51c2 --- /dev/null +++ b/vendor/sigs.k8s.io/json/README.md @@ -0,0 +1,40 @@ +# sigs.k8s.io/json + +[![Go Reference](https://pkg.go.dev/badge/sigs.k8s.io/json.svg)](https://pkg.go.dev/sigs.k8s.io/json) + +## Introduction + +This library is a subproject of [sig-api-machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery#json). +It provides case-sensitive, integer-preserving JSON unmarshaling functions based on `encoding/json` `Unmarshal()`. + +## Compatibility + +The `UnmarshalCaseSensitivePreserveInts()` function behaves like `encoding/json#Unmarshal()` with the following differences: + +- JSON object keys are treated case-sensitively. + Object keys must exactly match json tag names (for tagged struct fields) + or struct field names (for untagged struct fields). +- JSON integers are unmarshaled into `interface{}` fields as an `int64` instead of a + `float64` when possible, falling back to `float64` on any parse or overflow error. +- Syntax errors do not return an `encoding/json` `*SyntaxError` error. + Instead, they return an error which can be passed to `SyntaxErrorOffset()` to obtain an offset. + +## Additional capabilities + +The `UnmarshalStrict()` function decodes identically to `UnmarshalCaseSensitivePreserveInts()`, +and also returns non-fatal strict errors encountered while decoding: + +- Duplicate fields encountered +- Unknown fields encountered + +### Community, discussion, contribution, and support + +You can reach the maintainers of this project via the +[sig-api-machinery mailing list / channels](https://github.com/kubernetes/community/tree/master/sig-api-machinery#contact). + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + +[owners]: https://git.k8s.io/community/contributors/guide/owners.md +[Creative Commons 4.0]: https://git.k8s.io/website/LICENSE diff --git a/vendor/sigs.k8s.io/json/SECURITY.md b/vendor/sigs.k8s.io/json/SECURITY.md new file mode 100644 index 000000000..2083d44cd --- /dev/null +++ b/vendor/sigs.k8s.io/json/SECURITY.md @@ -0,0 +1,22 @@ +# Security Policy + +## Security Announcements + +Join the [kubernetes-security-announce] group for security and vulnerability announcements. + +You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss]. + +## Reporting a Vulnerability + +Instructions for reporting a vulnerability can be found on the +[Kubernetes Security and Disclosure Information] page. + +## Supported Versions + +Information about supported Kubernetes versions can be found on the +[Kubernetes version and version skew support policy] page on the Kubernetes website. + +[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce +[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50 +[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions +[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability diff --git a/vendor/sigs.k8s.io/json/SECURITY_CONTACTS b/vendor/sigs.k8s.io/json/SECURITY_CONTACTS new file mode 100644 index 000000000..6a51db2fe --- /dev/null +++ b/vendor/sigs.k8s.io/json/SECURITY_CONTACTS @@ -0,0 +1,15 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +deads2k +lavalamp +liggitt diff --git a/vendor/sigs.k8s.io/json/code-of-conduct.md b/vendor/sigs.k8s.io/json/code-of-conduct.md new file mode 100644 index 000000000..0d15c00cf --- /dev/null +++ b/vendor/sigs.k8s.io/json/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/json/doc.go b/vendor/sigs.k8s.io/json/doc.go new file mode 100644 index 000000000..050eebb03 --- /dev/null +++ b/vendor/sigs.k8s.io/json/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json // import "sigs.k8s.io/json" diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go new file mode 100644 index 000000000..a047d981b --- /dev/null +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -0,0 +1,1402 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "encoding" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}, opts ...UnmarshalOpt) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + + for _, opt := range opts { + opt(&d) + } + + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +/* +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} +*/ + +func (d *decodeState) unmarshal(v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + if d.savedError != nil { + return d.savedError + } + if len(d.savedStrictErrors) > 0 { + return &UnmarshalStrictError{Errors: d.savedStrictErrors} + } + return nil +} + +/* +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} +*/ + +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext + savedError error + useNumber bool + disallowUnknownFields bool + + savedStrictErrors []error + seenStrictErrors map[string]struct{} + + caseSensitive bool + + preserveInts bool + + disallowDuplicateFields bool +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + err.Field = strings.Join(d.errorContext.FieldStack, ".") + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the +// common case where we're decoding a literal. The decoder scans the input +// twice, once for syntax errors and to check the length of the value, and the +// second to perform the decoding. +// +// Only in the second step do we use decodeState to tokenize literals, so we +// know there aren't any syntax errors. We can take advantage of that knowledge, +// and scan a literal's bytes much more quickly. +func (d *decodeState) rescanLiteral() { + data, i := d.data, d.off +Switch: + switch data[i-1] { + case '"': // string + for ; i < len(data); i++ { + switch data[i] { + case '\\': + i++ // escaped char + case '"': + i++ // tokenize the closing quote too + break Switch + } + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number + for ; i < len(data); i++ { + switch data[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'e', 'E', '+', '-': + default: + break Switch + } + } + case 't': // true + i += len("rue") + case 'f': // false + i += len("alse") + case 'n': // null + i += len("ull") + } + if i < len(data) { + d.opcode = stateEndValue(&d.scan, data[i]) + } else { + d.opcode = scanEnd + } + d.off = i + 1 +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v interface{} + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields structFields + var checkDuplicateField func(fieldNameIndex int, fieldName string) + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + + if d.disallowDuplicateFields { + var seenKeys map[string]struct{} + checkDuplicateField = func(fieldNameIndex int, fieldName string) { + if seenKeys == nil { + seenKeys = map[string]struct{}{} + } + if _, seen := seenKeys[fieldName]; seen { + d.saveStrictError(fmt.Errorf("duplicate field %q", fieldName)) + } else { + seenKeys[fieldName] = struct{}{} + } + } + } + + case reflect.Struct: + fields = cachedTypeFields(t) + + if d.disallowDuplicateFields { + if len(fields.list) <= 64 { + // bitset by field index for structs with <= 64 fields + var seenKeys uint64 + checkDuplicateField = func(fieldNameIndex int, fieldName string) { + if seenKeys&(1< '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. + v.SetString(s) + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() (val interface{}) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if d.disallowDuplicateFields { + if _, exists := m[key]; exists { + d.saveStrictError(fmt.Errorf("duplicate field %q", key)) + } + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go new file mode 100644 index 000000000..e473e615a --- /dev/null +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go @@ -0,0 +1,1419 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// So that the JSON will be safe to embed inside HTML