mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
49 Commits
deschedule
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e362adf8df | ||
|
|
55b5191b63 | ||
|
|
90e7accd57 | ||
|
|
708b734ad6 | ||
|
|
ddd7cd1c13 | ||
|
|
0fcedb6e96 | ||
|
|
6eb44a1313 | ||
|
|
308309d6d2 | ||
|
|
c8d4658277 | ||
|
|
6f0cd50dc0 | ||
|
|
89f453ebb3 | ||
|
|
783304f248 | ||
|
|
caafe96ce7 | ||
|
|
2a05825b38 | ||
|
|
2fee121df3 | ||
|
|
c3424337c8 | ||
|
|
e217952707 | ||
|
|
f4977d1518 | ||
|
|
96c1c299eb | ||
|
|
ccff5fe155 | ||
|
|
289c003f87 | ||
|
|
c4ab2008e4 | ||
|
|
19aa8a234c | ||
|
|
bf2bd73f64 | ||
|
|
704a82bcf4 | ||
|
|
a01322a5e5 | ||
|
|
30aec81951 | ||
|
|
d8159c571d | ||
|
|
c59a0f0951 | ||
|
|
999dd3dc25 | ||
|
|
a243681ff8 | ||
|
|
73eb42467a | ||
|
|
b8eec7a459 | ||
|
|
a7d8e69820 | ||
|
|
3bd9dfc625 | ||
|
|
dca2e58b8e | ||
|
|
267efb3a4b | ||
|
|
c0d60196a1 | ||
|
|
2e1008e290 | ||
|
|
714a347692 | ||
|
|
af4b32c6ba | ||
|
|
c602d1256f | ||
|
|
a592cbc417 | ||
|
|
c557d187ab | ||
|
|
e8427d0eb4 | ||
|
|
c0f9761a61 | ||
|
|
e1dc63bcc7 | ||
|
|
8a458e0dcb | ||
|
|
99246cd254 |
2
.github/workflows/manifests.yaml
vendored
2
.github/workflows/manifests.yaml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.28.0"]
|
||||
descheduler-version: ["v0.28.0"]
|
||||
descheduler-version: ["v0.28.1"]
|
||||
descheduler-api: ["v1alpha1", "v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
run:
|
||||
deadline: 2m
|
||||
timeout: 2m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
@@ -15,4 +15,4 @@ linters-settings:
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
goimports:
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
|
||||
1
OWNERS
1
OWNERS
@@ -12,6 +12,7 @@ reviewers:
|
||||
- a7i
|
||||
- janeliul
|
||||
- knelasevero
|
||||
- jklaw90
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
|
||||
20
README.md
20
README.md
@@ -381,10 +381,9 @@ profiles:
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
evictableNamespaces:
|
||||
namespaces:
|
||||
exclude:
|
||||
- "kube-system"
|
||||
- "namespace1"
|
||||
exclude:
|
||||
- "kube-system"
|
||||
- "namespace1"
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -548,6 +547,19 @@ topologyBalanceNodeFit: false
|
||||
|
||||
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
|
||||
|
||||
[Supported Constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/#spread-constraint-definition) fields:
|
||||
|
||||
|Name|Supported?|
|
||||
|----|----------|
|
||||
|`maxSkew`|Yes|
|
||||
|`minDomains`|No|
|
||||
|`topologyKey`|Yes|
|
||||
|`whenUnsatisfiable`|Yes|
|
||||
|`labelSelector`|Yes|
|
||||
|`matchLabelKeys`|Yes|
|
||||
|`nodeAffinityPolicy`|Yes|
|
||||
|`nodeTaintsPolicy`|Yes|
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.28.0
|
||||
appVersion: 0.28.0
|
||||
version: 0.28.1
|
||||
appVersion: 0.28.1
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
icon: https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/assets/logo/descheduler-stacked-color.png
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
maintainers:
|
||||
|
||||
@@ -55,6 +55,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `timeZone` | configure `timeZone` for CronJob | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
|
||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
|
||||
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
|
||||
|
||||
@@ -21,6 +21,9 @@ spec:
|
||||
{{- if .Values.failedJobsHistoryLimit }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.timeZone }}
|
||||
timeZone: {{ .Values.timeZone }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
spec:
|
||||
{{- if .Values.ttlSecondsAfterFinished }}
|
||||
@@ -48,6 +51,10 @@ spec:
|
||||
affinity:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
|
||||
@@ -31,6 +31,10 @@ spec:
|
||||
{{- .Values.podAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
|
||||
@@ -45,6 +45,7 @@ suspend: false
|
||||
# successfulJobsHistoryLimit: 3
|
||||
# failedJobsHistoryLimit: 1
|
||||
# ttlSecondsAfterFinished 600
|
||||
# timeZone: Etc/UTC
|
||||
|
||||
# Required when running as a Deployment
|
||||
deschedulingInterval: 5m
|
||||
@@ -171,6 +172,8 @@ podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
dnsConfig: {}
|
||||
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
|
||||
@@ -44,6 +44,7 @@ type DeschedulerServer struct {
|
||||
EventClient clientset.Interface
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
DisableMetrics bool
|
||||
EnableHTTP2 bool
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
@@ -86,7 +87,6 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
|
||||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||
fs.StringVar(&rs.ClientConnection.Kubeconfig, "kubeconfig", rs.ClientConnection.Kubeconfig, "File with kube configuration. Deprecated, use client-connection-kubeconfig instead.")
|
||||
fs.StringVar(&rs.ClientConnection.Kubeconfig, "client-connection-kubeconfig", rs.ClientConnection.Kubeconfig, "File path to kube configuration for interacting with kubernetes apiserver.")
|
||||
@@ -101,6 +101,7 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.Tracing.ServiceNamespace, "otel-trace-namespace", "", "OTEL Trace namespace to be used with the resources")
|
||||
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
|
||||
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
|
||||
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
|
||||
|
||||
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
@@ -32,12 +31,14 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
jsonLog "k8s.io/component-base/logs/json"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
_ "k8s.io/component-base/logs/json/register"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -50,36 +51,31 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
klog.ErrorS(err, "unable to initialize server")
|
||||
}
|
||||
|
||||
featureGate := featuregate.NewFeatureGate()
|
||||
logConfig := logsapi.NewLoggingConfiguration()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "descheduler",
|
||||
Short: "descheduler",
|
||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// s.Logs.Config.Format = s.Logging.Format
|
||||
|
||||
// LoopbackClientConfig is a config for a privileged loopback connection
|
||||
var LoopbackClientConfig *restclient.Config
|
||||
var SecureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
|
||||
Long: "The descheduler evicts pods which may be bound to less desired nodes",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
logs.InitLogs()
|
||||
if logsapi.ValidateAndApply(logConfig, featureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
descheduler.SetupPlugins()
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
var factory registry.LogFormatFactory
|
||||
if s.Logging.Format == "json" {
|
||||
factory = jsonLog.Factory{}
|
||||
}
|
||||
|
||||
if factory == nil {
|
||||
klog.ClearLogger()
|
||||
} else {
|
||||
log, loggerControl := factory.Create(registry.LoggingConfiguration{
|
||||
Format: s.Logging.Format,
|
||||
Verbosity: s.Logging.Verbosity,
|
||||
}, registry.LoggingOptions{})
|
||||
defer loggerControl.Flush()
|
||||
klog.SetLogger(log)
|
||||
}
|
||||
secureServing.DisableHTTP2 = !s.EnableHTTP2
|
||||
|
||||
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
@@ -90,25 +86,31 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
stoppedCh, _, err := secureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
err = Run(ctx, s)
|
||||
if err != nil {
|
||||
if err = Run(ctx, s); err != nil {
|
||||
klog.ErrorS(err, "descheduler server")
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.SetOut(out)
|
||||
flags := cmd.Flags()
|
||||
s.AddFlags(flags)
|
||||
|
||||
runtime.Must(logsapi.AddFeatureGates(featureGate))
|
||||
logsapi.AddFlags(logConfig, flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -123,8 +125,3 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
watch.DefaultChanSize = 100000
|
||||
return descheduler.Run(ctx, rs)
|
||||
}
|
||||
|
||||
func SetupLogs() {
|
||||
klog.SetOutput(os.Stdout)
|
||||
klog.InitFlags(nil)
|
||||
}
|
||||
|
||||
@@ -21,14 +21,8 @@ import (
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
)
|
||||
|
||||
func init() {
|
||||
app.SetupLogs()
|
||||
descheduler.SetupPlugins()
|
||||
}
|
||||
|
||||
func main() {
|
||||
out := os.Stdout
|
||||
cmd := app.NewDeschedulerCommand(out)
|
||||
|
||||
@@ -21,6 +21,7 @@ descheduler [flags]
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run Execute descheduler in dry run mode.
|
||||
--enable-http2 If http/2 should be enabled for the metrics and health check
|
||||
-h, --help help for descheduler
|
||||
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
|
||||
@@ -31,7 +32,10 @@ descheduler [flags]
|
||||
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
|
||||
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--log-json-info-buffer-size quantity [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-json-split-stream [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--logging-format string Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". (default "text")
|
||||
--otel-collector-endpoint string Set this flag to the OpenTelemetry Collector Service Address
|
||||
--otel-fallback-no-op-on-error Fallback to NoOp Tracer in case of error
|
||||
--otel-sample-rate float Sample rate to collect the Traces (default 1)
|
||||
@@ -49,6 +53,8 @@ descheduler [flags]
|
||||
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
|
||||
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
|
||||
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule pattern=N,... comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
@@ -109,17 +109,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.28.0' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.28.1' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.28.0' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.28.1' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.28.0' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.28.1' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
|
||||
@@ -4,6 +4,7 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.27.1 | registry.k8s.io/descheduler/descheduler:v0.27.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.27.0 | registry.k8s.io/descheduler/descheduler:v0.27.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
|
||||
79
go.mod
79
go.mod
@@ -4,24 +4,24 @@ go 1.20
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.opentelemetry.io/otel v1.10.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0
|
||||
go.opentelemetry.io/otel/sdk v1.10.0
|
||||
go.opentelemetry.io/otel/trace v1.10.0
|
||||
google.golang.org/grpc v1.54.0
|
||||
k8s.io/api v0.28.0
|
||||
k8s.io/apimachinery v0.28.0
|
||||
k8s.io/apiserver v0.28.0
|
||||
k8s.io/client-go v0.28.0
|
||||
k8s.io/code-generator v0.28.0
|
||||
k8s.io/component-base v0.28.0
|
||||
k8s.io/component-helpers v0.28.0
|
||||
k8s.io/klog/v2 v2.100.1
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
go.opentelemetry.io/otel/trace v1.21.0
|
||||
google.golang.org/grpc v1.59.0
|
||||
k8s.io/api v0.28.4
|
||||
k8s.io/apimachinery v0.28.4
|
||||
k8s.io/apiserver v0.28.4
|
||||
k8s.io/client-go v0.28.4
|
||||
k8s.io/code-generator v0.28.4
|
||||
k8s.io/component-base v0.28.4
|
||||
k8s.io/component-helpers v0.28.4
|
||||
k8s.io/klog/v2 v2.110.1
|
||||
k8s.io/utils v0.0.0-20231121161247-cf03d44ff3cf
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
)
|
||||
|
||||
@@ -36,13 +36,13 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
@@ -52,12 +52,12 @@ require (
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/cel-go v0.16.0 // indirect
|
||||
github.com/google/cel-go v0.16.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/google/uuid v1.3.1 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
@@ -78,39 +78,40 @@ require (
|
||||
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/crypto v0.15.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.13.0 // indirect
|
||||
golang.org/x/oauth2 v0.8.0 // indirect
|
||||
golang.org/x/sync v0.2.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/term v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/net v0.18.0 // indirect
|
||||
golang.org/x/oauth2 v0.11.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.14.0 // indirect
|
||||
golang.org/x/term v0.14.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
|
||||
k8s.io/kms v0.28.0 // indirect
|
||||
k8s.io/kms v0.28.4 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
|
||||
|
||||
516
go.sum
516
go.sum
@@ -1,46 +1,10 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ=
|
||||
cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
|
||||
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
@@ -53,29 +17,17 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -83,28 +35,17 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||
@@ -120,90 +61,39 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/cel-go v0.16.0 h1:DG9YQ8nFCFXAs/FDDwBxmL1tpKNrdlGUM9U3537bX/Y=
|
||||
github.com/google/cel-go v0.16.0/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo=
|
||||
github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
@@ -213,8 +103,6 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -247,23 +135,19 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
|
||||
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
@@ -278,12 +162,10 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
|
||||
@@ -296,37 +178,29 @@ go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQa
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI=
|
||||
go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
|
||||
go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
|
||||
go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
|
||||
go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs=
|
||||
go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY=
|
||||
go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
|
||||
go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E=
|
||||
go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
@@ -334,186 +208,54 @@ go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
|
||||
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY=
|
||||
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
|
||||
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
||||
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
|
||||
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
@@ -521,112 +263,30 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M=
|
||||
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
|
||||
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
@@ -634,41 +294,31 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.28.0 h1:3j3VPWmN9tTDI68NETBWlDiA9qOiGJ7sdKeufehBYsM=
|
||||
k8s.io/api v0.28.0/go.mod h1:0l8NZJzB0i/etuWnIXcwfIv+xnDOhL3lLW919AWYDuY=
|
||||
k8s.io/apimachinery v0.28.0 h1:ScHS2AG16UlYWk63r46oU3D5y54T53cVI5mMJwwqFNA=
|
||||
k8s.io/apimachinery v0.28.0/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw=
|
||||
k8s.io/apiserver v0.28.0 h1:wVh7bK6Xj7hq+5ntInysTeQRAOqqFoKGUOW2yj8DXrY=
|
||||
k8s.io/apiserver v0.28.0/go.mod h1:MvLmtxhQ0Tb1SZk4hfJBjs8iqr5nhYeaFSaoEcz7Lk4=
|
||||
k8s.io/client-go v0.28.0 h1:ebcPRDZsCjpj62+cMk1eGNX1QkMdRmQ6lmz5BLoFWeM=
|
||||
k8s.io/client-go v0.28.0/go.mod h1:0Asy9Xt3U98RypWJmU1ZrRAGKhP6NqDPmptlAzK2kMc=
|
||||
k8s.io/code-generator v0.28.0 h1:msdkRVJNVFgdiIJ8REl/d3cZsMB9HByFcWMmn13NyuE=
|
||||
k8s.io/code-generator v0.28.0/go.mod h1:ueeSJZJ61NHBa0ccWLey6mwawum25vX61nRZ6WOzN9A=
|
||||
k8s.io/component-base v0.28.0 h1:HQKy1enJrOeJlTlN4a6dU09wtmXaUvThC0irImfqyxI=
|
||||
k8s.io/component-base v0.28.0/go.mod h1:Yyf3+ZypLfMydVzuLBqJ5V7Kx6WwDr/5cN+dFjw1FNk=
|
||||
k8s.io/component-helpers v0.28.0 h1:ubHUiEF7H/DOx4471pHHsLlH3EGu8jlEvnld5PS4KdI=
|
||||
k8s.io/component-helpers v0.28.0/go.mod h1:i7hJ/oFhZImqUWwjLFG/yGkLpJ3KFoirY2DLYIMql6Q=
|
||||
k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY=
|
||||
k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0=
|
||||
k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8=
|
||||
k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg=
|
||||
k8s.io/apiserver v0.28.4 h1:BJXlaQbAU/RXYX2lRz+E1oPe3G3TKlozMMCZWu5GMgg=
|
||||
k8s.io/apiserver v0.28.4/go.mod h1:Idq71oXugKZoVGUUL2wgBCTHbUR+FYTWa4rq9j4n23w=
|
||||
k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY=
|
||||
k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4=
|
||||
k8s.io/code-generator v0.28.4 h1:tcOSNIZQvuAvXhOwpbuJkKbAABJQeyCcQBCN/3uI18c=
|
||||
k8s.io/code-generator v0.28.4/go.mod h1:OQAfl6bZikQ/tK6faJ18Vyzo54rUII2NmjurHyiN1g4=
|
||||
k8s.io/component-base v0.28.4 h1:c/iQLWPdUgI90O+T9TeECg8o7N3YJTiuz2sKxILYcYo=
|
||||
k8s.io/component-base v0.28.4/go.mod h1:m9hR0uvqXDybiGL2nf/3Lf0MerAfQXzkfWhUY58JUbU=
|
||||
k8s.io/component-helpers v0.28.4 h1:+X9VXT5+jUsRdC26JyMZ8Fjfln7mSjgumafocE509C4=
|
||||
k8s.io/component-helpers v0.28.4/go.mod h1:8LzMalOQ0K10tkBJWBWq8h0HTI9HDPx4WT3QvTFn9Ro=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
|
||||
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kms v0.28.0 h1:BwJhU9qPcJhHLUcQjtelOSjYti+1/caJLr+4jHbKzTA=
|
||||
k8s.io/kms v0.28.0/go.mod h1:CNU792ls92v2Ye7Vn1jn+xLqYtUSezDZNVu6PLbJyrU=
|
||||
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
|
||||
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
|
||||
k8s.io/kms v0.28.4 h1:PMgY/3CQTWP9eIKmNQiTgjLIZ0ns6O+voagzD2/4mSg=
|
||||
k8s.io/kms v0.28.4/go.mod h1:HL4/lR/bhjAJPbqycKtfhWiKh1Sp21cpHOL8P4oo87w=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
k8s.io/utils v0.0.0-20231121161247-cf03d44ff3cf h1:iTzha1p7Fi83476ypNSz8nV9iR9932jIIs26F7gNLsU=
|
||||
k8s.io/utils v0.0.0-20231121161247-cf03d44ff3cf/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
|
||||
257
hack/cherry_pick_pull.sh
Executable file
257
hack/cherry_pick_pull.sh
Executable file
@@ -0,0 +1,257 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Usage Instructions: https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md
|
||||
|
||||
# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How
|
||||
# meta.) Assumes you care about pulls from remote "upstream" and
|
||||
# checks them out to a branch named:
|
||||
# automated-cherry-pick-of-<pr>-<target branch>-<timestamp>
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
REPO_ROOT="$(git rev-parse --show-toplevel)"
|
||||
declare -r REPO_ROOT
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
STARTINGBRANCH=$(git symbolic-ref --short HEAD)
|
||||
declare -r STARTINGBRANCH
|
||||
declare -r REBASEMAGIC="${REPO_ROOT}/.git/rebase-apply"
|
||||
DRY_RUN=${DRY_RUN:-""}
|
||||
REGENERATE_DOCS=${REGENERATE_DOCS:-""}
|
||||
UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream}
|
||||
FORK_REMOTE=${FORK_REMOTE:-origin}
|
||||
MAIN_REPO_ORG=${MAIN_REPO_ORG:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $3}')}
|
||||
MAIN_REPO_NAME=${MAIN_REPO_NAME:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $4}')}
|
||||
|
||||
if [[ -z ${GITHUB_USER:-} ]]; then
|
||||
echo "Please export GITHUB_USER=<your-user> (or GH organization, if that's where your fork lives)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v gh > /dev/null; then
|
||||
echo "Can't find 'gh' tool in PATH, please install from https://github.com/cli/cli"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$#" -lt 2 ]]; then
|
||||
echo "${0} <remote branch> <pr-number>...: cherry pick one or more <pr> onto <remote branch> and leave instructions for proposing pull request"
|
||||
echo
|
||||
echo " Checks out <remote branch> and handles the cherry-pick of <pr> (possibly multiple) for you."
|
||||
echo " Examples:"
|
||||
echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR."
|
||||
echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR."
|
||||
echo
|
||||
echo " Set the DRY_RUN environment var to skip git push and creating PR."
|
||||
echo " This is useful for creating patches to a release branch without making a PR."
|
||||
echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked."
|
||||
echo
|
||||
echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits."
|
||||
echo " This is useful when picking commits containing changes to API documentation."
|
||||
echo
|
||||
echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)"
|
||||
echo " to override the default remote names to what you have locally."
|
||||
echo
|
||||
echo " For merge process info, see https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Checks if you are logged in. Will error/bail if you are not.
|
||||
gh auth status
|
||||
|
||||
if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then
|
||||
echo "!!! Dirty tree. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -e "${REBASEMAGIC}" ]]; then
|
||||
echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -r BRANCH="$1"
|
||||
shift 1
|
||||
declare -r PULLS=( "$@" )
|
||||
|
||||
function join { local IFS="$1"; shift; echo "$*"; }
|
||||
PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789"
|
||||
declare -r PULLDASH
|
||||
PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789"
|
||||
declare -r PULLSUBJ
|
||||
|
||||
echo "+++ Updating remotes..."
|
||||
git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}"
|
||||
|
||||
if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then
|
||||
echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21."
|
||||
echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools.
|
||||
declare -r NEWBRANCHREQ
|
||||
NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')"
|
||||
declare -r NEWBRANCH
|
||||
NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)"
|
||||
declare -r NEWBRANCHUNIQ
|
||||
echo "+++ Creating local branch ${NEWBRANCHUNIQ}"
|
||||
|
||||
cleanbranch=""
|
||||
gitamcleanup=false
|
||||
function return_to_kansas {
|
||||
if [[ "${gitamcleanup}" == "true" ]]; then
|
||||
echo
|
||||
echo "+++ Aborting in-progress git am."
|
||||
git am --abort >/dev/null 2>&1 || true
|
||||
fi
|
||||
|
||||
# return to the starting branch and delete the PR text file
|
||||
if [[ -z "${DRY_RUN}" ]]; then
|
||||
echo
|
||||
echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up."
|
||||
git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true
|
||||
if [[ -n "${cleanbranch}" ]]; then
|
||||
git branch -D "${cleanbranch}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
trap return_to_kansas EXIT
|
||||
|
||||
SUBJECTS=()
|
||||
function make-a-pr() {
|
||||
local rel
|
||||
rel="$(basename "${BRANCH}")"
|
||||
echo
|
||||
echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}"
|
||||
|
||||
local numandtitle
|
||||
numandtitle=$(printf '%s\n' "${SUBJECTS[@]}")
|
||||
prtext=$(cat <<EOF
|
||||
Cherry pick of ${PULLSUBJ} on ${rel}.
|
||||
|
||||
${numandtitle}
|
||||
|
||||
For details on the cherry pick process, see the [cherry pick requests](https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md) page.
|
||||
|
||||
\`\`\`release-note
|
||||
|
||||
\`\`\`
|
||||
EOF
|
||||
)
|
||||
|
||||
gh pr create --title="Automated cherry pick of ${numandtitle}" --body="${prtext}" --head "${GITHUB_USER}:${NEWBRANCH}" --base "${rel}" --repo="${MAIN_REPO_ORG}/${MAIN_REPO_NAME}"
|
||||
}
|
||||
|
||||
git checkout -b "${NEWBRANCHUNIQ}" "${BRANCH}"
|
||||
cleanbranch="${NEWBRANCHUNIQ}"
|
||||
|
||||
gitamcleanup=true
|
||||
for pull in "${PULLS[@]}"; do
|
||||
echo "+++ Downloading patch to /tmp/${pull}.patch (in case you need to do this again)"
|
||||
|
||||
curl -o "/tmp/${pull}.patch" -sSL "https://github.com/${MAIN_REPO_ORG}/${MAIN_REPO_NAME}/pull/${pull}.patch"
|
||||
echo
|
||||
echo "+++ About to attempt cherry pick of PR. To reattempt:"
|
||||
echo " $ git am -3 /tmp/${pull}.patch"
|
||||
echo
|
||||
git am -3 "/tmp/${pull}.patch" || {
|
||||
conflicts=false
|
||||
while unmerged=$(git status --porcelain | grep ^U) && [[ -n ${unmerged} ]] \
|
||||
|| [[ -e "${REBASEMAGIC}" ]]; do
|
||||
conflicts=true # <-- We should have detected conflicts once
|
||||
echo
|
||||
echo "+++ Conflicts detected:"
|
||||
echo
|
||||
(git status --porcelain | grep ^U) || echo "!!! None. Did you git am --continue?"
|
||||
echo
|
||||
echo "+++ Please resolve the conflicts in another window (and remember to 'git add / git am --continue')"
|
||||
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
echo
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${conflicts}" != "true" ]]; then
|
||||
echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# set the subject
|
||||
subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //')
|
||||
SUBJECTS+=("#${pull}: ${subject}")
|
||||
|
||||
# remove the patch file from /tmp
|
||||
rm -f "/tmp/${pull}.patch"
|
||||
done
|
||||
gitamcleanup=false
|
||||
|
||||
# Re-generate docs (if needed)
|
||||
if [[ -n "${REGENERATE_DOCS}" ]]; then
|
||||
echo
|
||||
echo "Regenerating docs..."
|
||||
if ! hack/generate-docs.sh; then
|
||||
echo
|
||||
echo "hack/generate-docs.sh FAILED to complete."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${DRY_RUN}" ]]; then
|
||||
echo "!!! Skipping git push and PR creation because you set DRY_RUN."
|
||||
echo "To return to the branch you were in when you invoked this script:"
|
||||
echo
|
||||
echo " git checkout ${STARTINGBRANCH}"
|
||||
echo
|
||||
echo "To delete this branch:"
|
||||
echo
|
||||
echo " git branch -D ${NEWBRANCHUNIQ}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if git remote -v | grep ^"${FORK_REMOTE}" | grep "${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"; then
|
||||
echo "!!! You have ${FORK_REMOTE} configured as your ${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"
|
||||
echo "This isn't normal. Leaving you with push instructions:"
|
||||
echo
|
||||
echo "+++ First manually push the branch this script created:"
|
||||
echo
|
||||
echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)."
|
||||
echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values."
|
||||
echo
|
||||
make-a-pr
|
||||
cleanbranch=""
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):"
|
||||
echo
|
||||
echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
make-a-pr
|
||||
@@ -1,5 +1,9 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
featureGates:
|
||||
# beta as of 1.27 but we currently run e2e on 1.26
|
||||
# this flag should be removed as part of Descheduler 0.29 release
|
||||
MatchLabelKeysInPodTopologySpread: true
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.27.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.28.1
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.27.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.28.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.27.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.28.1
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -61,10 +60,6 @@ type DeschedulerConfiguration struct {
|
||||
// LeaderElection starts Deployment using leader election loop
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||
Logging registry.LoggingConfiguration
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
|
||||
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
|
||||
ClientConnection componentbaseconfig.ClientConnectionConfiguration
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -61,10 +60,6 @@ type DeschedulerConfiguration struct {
|
||||
// LeaderElection starts Deployment using leader election loop
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||
Logging registry.LoggingConfiguration `json:"logging,omitempty"`
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
|
||||
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
|
||||
ClientConnection componentbaseconfig.ClientConnectionConfiguration `json:"clientConnection,omitempty"`
|
||||
|
||||
@@ -72,7 +72,6 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
return err
|
||||
}
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.Logging = in.Logging
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return nil
|
||||
}
|
||||
@@ -95,7 +94,6 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
return err
|
||||
}
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.Logging = in.Logging
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration)
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.Tracing = in.Tracing
|
||||
out.LeaderElection = in.LeaderElection
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration)
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.Tracing = in.Tracing
|
||||
out.LeaderElection = in.LeaderElection
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return
|
||||
}
|
||||
|
||||
@@ -21,8 +21,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
@@ -38,6 +36,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
@@ -277,29 +276,29 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
}
|
||||
|
||||
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, versionInfo version.Info) error {
|
||||
serverVersion, serverErr := discovery.ServerVersion()
|
||||
if serverErr != nil {
|
||||
return errors.New("failed to get Kubernetes server version")
|
||||
serverVersionInfo, err := discovery.ServerVersion()
|
||||
if err != nil {
|
||||
return errors.New("failed to discover Kubernetes server version")
|
||||
}
|
||||
|
||||
deschedulerMinorVersion := strings.Split(versionInfo.Minor, ".")[0]
|
||||
deschedulerMinorVersionFloat, err := strconv.ParseFloat(deschedulerMinorVersion, 64)
|
||||
serverVersion, err := utilversion.ParseSemantic(serverVersionInfo.String())
|
||||
if err != nil {
|
||||
return errors.New("failed to parse Kubernetes server version")
|
||||
}
|
||||
|
||||
deschedulerVersion, err := utilversion.ParseGeneric(versionInfo.GitVersion)
|
||||
if err != nil {
|
||||
return errors.New("failed to convert Descheduler minor version to float")
|
||||
}
|
||||
|
||||
kubernetesMinorVersionFloat, err := strconv.ParseFloat(serverVersion.Minor, 64)
|
||||
if err != nil {
|
||||
return errors.New("failed to convert Kubernetes server minor version to float")
|
||||
}
|
||||
|
||||
if math.Abs(deschedulerMinorVersionFloat-kubernetesMinorVersionFloat) > 3 {
|
||||
deschedulerMinor := float64(deschedulerVersion.Minor())
|
||||
serverMinor := float64(serverVersion.Minor())
|
||||
if math.Abs(deschedulerMinor-serverMinor) > 3 {
|
||||
return fmt.Errorf(
|
||||
"descheduler minor version %v is not supported on your version of Kubernetes %v.%v. "+
|
||||
"descheduler version %v may not be supported on your version of Kubernetes %v."+
|
||||
"See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix",
|
||||
deschedulerMinorVersion,
|
||||
serverVersion.Major,
|
||||
serverVersion.Minor,
|
||||
deschedulerVersion.String(),
|
||||
serverVersionInfo.String(),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -250,49 +250,55 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
|
||||
func TestValidateVersionCompatibility(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
deschedulerMinor string
|
||||
serverMinor string
|
||||
expectError bool
|
||||
name string
|
||||
deschedulerVersion string
|
||||
serverVersion string
|
||||
expectError bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "no error when descheduler minor equals to server minor",
|
||||
deschedulerMinor: "26.0",
|
||||
serverMinor: "26",
|
||||
expectError: false,
|
||||
name: "no error when descheduler minor equals to server minor",
|
||||
deschedulerVersion: "v0.26",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 behind server minor",
|
||||
deschedulerMinor: "23.0",
|
||||
serverMinor: "26",
|
||||
expectError: false,
|
||||
name: "no error when descheduler minor is 3 behind server minor",
|
||||
deschedulerVersion: "0.23",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 ahead of server minor",
|
||||
deschedulerMinor: "26.0",
|
||||
serverMinor: "23",
|
||||
expectError: false,
|
||||
name: "no error when descheduler minor is 3 ahead of server minor",
|
||||
deschedulerVersion: "v0.26",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 behind server minor",
|
||||
deschedulerMinor: "22.0",
|
||||
serverMinor: "26",
|
||||
expectError: true,
|
||||
name: "error when descheduler minor is 4 behind server minor",
|
||||
deschedulerVersion: "v0.22",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 ahead of server minor",
|
||||
deschedulerMinor: "27.0",
|
||||
serverMinor: "23",
|
||||
expectError: true,
|
||||
name: "error when descheduler minor is 4 ahead of server minor",
|
||||
deschedulerVersion: "v0.27",
|
||||
serverVersion: "v1.23.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "no error when using managed provider version",
|
||||
deschedulerVersion: "v0.25",
|
||||
serverVersion: "v1.25.12-eks-2d98532",
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
client := fakeclientset.NewSimpleClientset()
|
||||
fakeDiscovery, _ := client.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeDiscovery.FakedServerVersion = &apiversion.Info{Major: "1", Minor: tc.serverMinor}
|
||||
deschedulerVersion := deschedulerversion.Info{Major: "0", Minor: tc.deschedulerMinor}
|
||||
fakeDiscovery.FakedServerVersion = &apiversion.Info{GitVersion: tc.serverVersion}
|
||||
deschedulerVersion := deschedulerversion.Info{GitVersion: tc.deschedulerVersion}
|
||||
err := validateVersionCompatibility(fakeDiscovery, deschedulerVersion)
|
||||
|
||||
hasError := err != nil
|
||||
|
||||
@@ -131,7 +131,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per node reached")))
|
||||
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per node reached"), "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per node reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -141,7 +141,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per namespace reached")))
|
||||
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per namespace reached"), "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per namespace reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -156,11 +156,11 @@ func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
if defaultEvictorArgs.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), defaultEvictorArgs.NodeSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(fmt.Errorf("Pod fails the following checks"), "pod", klog.KObj(pod))
|
||||
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
|
||||
klog.ErrorS(fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable"), "pod", klog.KObj(pod))
|
||||
klog.InfoS("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@@ -18,6 +18,7 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -163,6 +164,10 @@ func resourceThreshold(nodeCapacity v1.ResourceList, resourceName v1.ResourceNam
|
||||
return resource.NewQuantity(resourceCapacityFraction(resourceCapacityQuantity.Value()), defaultFormat)
|
||||
}
|
||||
|
||||
func roundTo2Decimals(percentage float64) float64 {
|
||||
return math.Round(percentage*100) / 100
|
||||
}
|
||||
|
||||
func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
nodeCapacity := nodeUsage.node.Status.Capacity
|
||||
if len(nodeUsage.node.Status.Allocatable) > 0 {
|
||||
@@ -174,6 +179,7 @@ func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
cap := nodeCapacity[resourceName]
|
||||
if !cap.IsZero() {
|
||||
resourceUsagePercentage[resourceName] = 100 * float64(resourceUsage.MilliValue()) / float64(cap.MilliValue())
|
||||
resourceUsagePercentage[resourceName] = roundTo2Decimals(resourceUsagePercentage[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -57,6 +57,10 @@ type podOwner struct {
|
||||
imagesHash string
|
||||
}
|
||||
|
||||
func (po podOwner) String() string {
|
||||
return fmt.Sprintf("%s/%s/%s/%s", po.namespace, po.kind, po.name, po.imagesHash)
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
removeDuplicatesArgs, ok := args.(*RemoveDuplicatesArgs)
|
||||
|
||||
@@ -28,6 +28,8 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
v1helper "k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -48,6 +50,19 @@ type topology struct {
|
||||
pods []*v1.Pod
|
||||
}
|
||||
|
||||
// topologySpreadConstraint is an internal version for v1.TopologySpreadConstraint
|
||||
// and where the selector is parsed.
|
||||
// This mirrors scheduler: https://github.com/kubernetes/kubernetes/blob/release-1.28/pkg/scheduler/framework/plugins/podtopologyspread/common.go#L37
|
||||
type topologySpreadConstraint struct {
|
||||
maxSkew int32
|
||||
topologyKey string
|
||||
selector labels.Selector
|
||||
nodeAffinityPolicy v1.NodeInclusionPolicy
|
||||
nodeTaintsPolicy v1.NodeInclusionPolicy
|
||||
podNodeAffinity nodeaffinity.RequiredNodeAffinity
|
||||
podTolerations []v1.Toleration
|
||||
}
|
||||
|
||||
// RemovePodsViolatingTopologySpreadConstraint evicts pods which violate their topology spread constraints
|
||||
type RemovePodsViolatingTopologySpreadConstraint struct {
|
||||
handle frameworktypes.Handle
|
||||
@@ -132,20 +147,27 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
}
|
||||
|
||||
// ...where there is a topology constraint
|
||||
namespaceTopologySpreadConstraints := []v1.TopologySpreadConstraint{}
|
||||
var namespaceTopologySpreadConstraints []topologySpreadConstraint
|
||||
for _, pod := range namespacedPods[namespace] {
|
||||
for _, constraint := range pod.Spec.TopologySpreadConstraints {
|
||||
// Ignore topology constraints if they are not included
|
||||
if !allowedConstraints.Has(constraint.WhenUnsatisfiable) {
|
||||
continue
|
||||
}
|
||||
// Need to check v1.TopologySpreadConstraint deepEquality because
|
||||
// v1.TopologySpreadConstraint has pointer fields
|
||||
// and we don't need to go over duplicated constraints later on
|
||||
if hasIdenticalConstraints(constraint, namespaceTopologySpreadConstraints) {
|
||||
|
||||
namespaceTopologySpreadConstraint, err := newTopologySpreadConstraint(constraint, pod)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "cannot process topology spread constraint")
|
||||
continue
|
||||
}
|
||||
namespaceTopologySpreadConstraints = append(namespaceTopologySpreadConstraints, constraint)
|
||||
|
||||
// Need to check TopologySpreadConstraint deepEquality because
|
||||
// TopologySpreadConstraint can haves pointer fields
|
||||
// and we don't need to go over duplicated constraints later on
|
||||
if hasIdenticalConstraints(namespaceTopologySpreadConstraint, namespaceTopologySpreadConstraints) {
|
||||
continue
|
||||
}
|
||||
namespaceTopologySpreadConstraints = append(namespaceTopologySpreadConstraints, namespaceTopologySpreadConstraint)
|
||||
}
|
||||
}
|
||||
if len(namespaceTopologySpreadConstraints) == 0 {
|
||||
@@ -153,22 +175,18 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
}
|
||||
|
||||
// 2. for each topologySpreadConstraint in that namespace
|
||||
for _, constraint := range namespaceTopologySpreadConstraints {
|
||||
for _, tsc := range namespaceTopologySpreadConstraints {
|
||||
constraintTopologies := make(map[topologyPair][]*v1.Pod)
|
||||
// pre-populate the topologyPair map with all the topologies available from the nodeMap
|
||||
// (we can't just build it from existing pods' nodes because a topology may have 0 pods)
|
||||
for _, node := range nodeMap {
|
||||
if val, ok := node.Labels[constraint.TopologyKey]; ok {
|
||||
constraintTopologies[topologyPair{key: constraint.TopologyKey, value: val}] = make([]*v1.Pod, 0)
|
||||
if val, ok := node.Labels[tsc.topologyKey]; ok {
|
||||
if matchNodeInclusionPolicies(tsc, node) {
|
||||
constraintTopologies[topologyPair{key: tsc.topologyKey, value: val}] = make([]*v1.Pod, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(constraint.LabelSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't parse label selector as selector", "selector", constraint.LabelSelector)
|
||||
continue
|
||||
}
|
||||
|
||||
// 3. for each evictable pod in that namespace
|
||||
// (this loop is where we count the number of pods per topologyValue that match this constraint's selector)
|
||||
var sumPods float64
|
||||
@@ -178,7 +196,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
continue
|
||||
}
|
||||
// 4. if the pod matches this TopologySpreadConstraint LabelSelector
|
||||
if !selector.Matches(labels.Set(pod.Labels)) {
|
||||
if !tsc.selector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -188,21 +206,21 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
// If ok is false, node is nil in which case node.Labels will panic. In which case a pod is yet to be scheduled. So it's safe to just continue here.
|
||||
continue
|
||||
}
|
||||
nodeValue, ok := node.Labels[constraint.TopologyKey]
|
||||
nodeValue, ok := node.Labels[tsc.topologyKey]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// 6. create a topoPair with key as this TopologySpreadConstraint
|
||||
topoPair := topologyPair{key: constraint.TopologyKey, value: nodeValue}
|
||||
topoPair := topologyPair{key: tsc.topologyKey, value: nodeValue}
|
||||
// 7. add the pod with key as this topoPair
|
||||
constraintTopologies[topoPair] = append(constraintTopologies[topoPair], pod)
|
||||
sumPods++
|
||||
}
|
||||
if topologyIsBalanced(constraintTopologies, constraint) {
|
||||
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
|
||||
if topologyIsBalanced(constraintTopologies, tsc) {
|
||||
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", tsc)
|
||||
continue
|
||||
}
|
||||
d.balanceDomains(podsForEviction, constraint, constraintTopologies, sumPods, nodes)
|
||||
d.balanceDomains(podsForEviction, tsc, constraintTopologies, sumPods, nodes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,7 +245,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
}
|
||||
|
||||
// hasIdenticalConstraints checks if we already had an identical TopologySpreadConstraint in namespaceTopologySpreadConstraints slice
|
||||
func hasIdenticalConstraints(newConstraint v1.TopologySpreadConstraint, namespaceTopologySpreadConstraints []v1.TopologySpreadConstraint) bool {
|
||||
func hasIdenticalConstraints(newConstraint topologySpreadConstraint, namespaceTopologySpreadConstraints []topologySpreadConstraint) bool {
|
||||
for _, constraint := range namespaceTopologySpreadConstraints {
|
||||
if reflect.DeepEqual(newConstraint, constraint) {
|
||||
return true
|
||||
@@ -238,7 +256,7 @@ func hasIdenticalConstraints(newConstraint v1.TopologySpreadConstraint, namespac
|
||||
|
||||
// topologyIsBalanced checks if any domains in the topology differ by more than the MaxSkew
|
||||
// this is called before any sorting or other calculations and is used to skip topologies that don't need to be balanced
|
||||
func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.TopologySpreadConstraint) bool {
|
||||
func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, tsc topologySpreadConstraint) bool {
|
||||
minDomainSize := math.MaxInt32
|
||||
maxDomainSize := math.MinInt32
|
||||
for _, pods := range topology {
|
||||
@@ -248,7 +266,7 @@ func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.Topol
|
||||
if len(pods) > maxDomainSize {
|
||||
maxDomainSize = len(pods)
|
||||
}
|
||||
if int32(maxDomainSize-minDomainSize) > constraint.MaxSkew {
|
||||
if int32(maxDomainSize-minDomainSize) > tsc.maxSkew {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -277,7 +295,7 @@ func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.Topol
|
||||
// (assuming even distribution by the scheduler of the evicted pods)
|
||||
func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
|
||||
podsForEviction map[*v1.Pod]struct{},
|
||||
constraint v1.TopologySpreadConstraint,
|
||||
tsc topologySpreadConstraint,
|
||||
constraintTopologies map[topologyPair][]*v1.Pod,
|
||||
sumPods float64,
|
||||
nodes []*v1.Node,
|
||||
@@ -288,7 +306,8 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
|
||||
getPodsAssignedToNode := d.handle.GetPodsAssignedToNodeFunc()
|
||||
topologyBalanceNodeFit := utilpointer.BoolDeref(d.args.TopologyBalanceNodeFit, true)
|
||||
|
||||
nodesBelowIdealAvg := filterNodesBelowIdealAvg(nodes, sortedDomains, constraint.TopologyKey, idealAvg)
|
||||
eligibleNodes := filterEligibleNodes(nodes, tsc)
|
||||
nodesBelowIdealAvg := filterNodesBelowIdealAvg(eligibleNodes, sortedDomains, tsc.topologyKey, idealAvg)
|
||||
|
||||
// i is the index for belowOrEqualAvg
|
||||
// j is the index for aboveAvg
|
||||
@@ -304,7 +323,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
|
||||
skew := float64(len(sortedDomains[j].pods) - len(sortedDomains[i].pods))
|
||||
|
||||
// if k and j are within the maxSkew of each other, move to next belowOrEqualAvg
|
||||
if int32(skew) <= constraint.MaxSkew {
|
||||
if int32(skew) <= tsc.maxSkew {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
@@ -318,7 +337,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
|
||||
aboveAvg := math.Ceil(float64(len(sortedDomains[j].pods)) - idealAvg)
|
||||
belowAvg := math.Ceil(idealAvg - float64(len(sortedDomains[i].pods)))
|
||||
smallestDiff := math.Min(aboveAvg, belowAvg)
|
||||
halfSkew := math.Ceil((skew - float64(constraint.MaxSkew)) / 2)
|
||||
halfSkew := math.Ceil((skew - float64(tsc.maxSkew)) / 2)
|
||||
movePods := int(math.Min(smallestDiff, halfSkew))
|
||||
if movePods <= 0 {
|
||||
i++
|
||||
@@ -435,3 +454,92 @@ func comparePodsByPriority(iPod, jPod *v1.Pod) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// doNotScheduleTaintsFilterFunc returns the filter function that can
|
||||
// filter out the node taints that reject scheduling Pod on a Node.
|
||||
func doNotScheduleTaintsFilterFunc() func(t *v1.Taint) bool {
|
||||
return func(t *v1.Taint) bool {
|
||||
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
|
||||
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
|
||||
}
|
||||
}
|
||||
|
||||
func filterEligibleNodes(nodes []*v1.Node, tsc topologySpreadConstraint) []*v1.Node {
|
||||
var eligibleNodes []*v1.Node
|
||||
for _, node := range nodes {
|
||||
if matchNodeInclusionPolicies(tsc, node) {
|
||||
eligibleNodes = append(eligibleNodes, node)
|
||||
}
|
||||
}
|
||||
return eligibleNodes
|
||||
}
|
||||
|
||||
func matchNodeInclusionPolicies(tsc topologySpreadConstraint, node *v1.Node) bool {
|
||||
if tsc.nodeAffinityPolicy == v1.NodeInclusionPolicyHonor {
|
||||
// We ignore parsing errors here for backwards compatibility.
|
||||
if match, _ := tsc.podNodeAffinity.Match(node); !match {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if tsc.nodeTaintsPolicy == v1.NodeInclusionPolicyHonor {
|
||||
if _, untolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, tsc.podTolerations, doNotScheduleTaintsFilterFunc()); untolerated {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// inspired by Scheduler: https://github.com/kubernetes/kubernetes/blob/release-1.28/pkg/scheduler/framework/plugins/podtopologyspread/common.go#L90
|
||||
func newTopologySpreadConstraint(constraint v1.TopologySpreadConstraint, pod *v1.Pod) (topologySpreadConstraint, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(constraint.LabelSelector)
|
||||
if err != nil {
|
||||
return topologySpreadConstraint{}, err
|
||||
}
|
||||
|
||||
if len(constraint.MatchLabelKeys) > 0 && pod.Labels != nil {
|
||||
matchLabels := make(labels.Set)
|
||||
for _, labelKey := range constraint.MatchLabelKeys {
|
||||
if value, ok := pod.Labels[labelKey]; ok {
|
||||
matchLabels[labelKey] = value
|
||||
}
|
||||
}
|
||||
if len(matchLabels) > 0 {
|
||||
selector = mergeLabelSetWithSelector(matchLabels, selector)
|
||||
}
|
||||
}
|
||||
|
||||
tsc := topologySpreadConstraint{
|
||||
maxSkew: constraint.MaxSkew,
|
||||
topologyKey: constraint.TopologyKey,
|
||||
selector: selector,
|
||||
nodeAffinityPolicy: v1.NodeInclusionPolicyHonor, // If NodeAffinityPolicy is nil, we treat NodeAffinityPolicy as "Honor".
|
||||
nodeTaintsPolicy: v1.NodeInclusionPolicyIgnore, // If NodeTaintsPolicy is nil, we treat NodeTaintsPolicy as "Ignore".
|
||||
podNodeAffinity: nodeaffinity.GetRequiredNodeAffinity(pod),
|
||||
podTolerations: pod.Spec.Tolerations,
|
||||
}
|
||||
if constraint.NodeAffinityPolicy != nil {
|
||||
tsc.nodeAffinityPolicy = *constraint.NodeAffinityPolicy
|
||||
}
|
||||
if constraint.NodeTaintsPolicy != nil {
|
||||
tsc.nodeTaintsPolicy = *constraint.NodeTaintsPolicy
|
||||
}
|
||||
|
||||
return tsc, nil
|
||||
}
|
||||
|
||||
// Scheduler: https://github.com/kubernetes/kubernetes/blob/release-1.28/pkg/scheduler/framework/plugins/podtopologyspread/common.go#L136
|
||||
func mergeLabelSetWithSelector(matchLabels labels.Set, s labels.Selector) labels.Selector {
|
||||
mergedSelector := labels.SelectorFromSet(matchLabels)
|
||||
|
||||
requirements, ok := s.Requirements()
|
||||
if !ok {
|
||||
return s
|
||||
}
|
||||
|
||||
for _, r := range requirements {
|
||||
mergedSelector = mergedSelector.Add(r)
|
||||
}
|
||||
|
||||
return mergedSelector
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -303,12 +304,14 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
nodeAffinity: &v1.Affinity{NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{MatchExpressions: []v1.NodeSelectorRequirement{{Key: "foo", Values: []string{"bar"}, Operator: v1.NodeSelectorOpIn}}},
|
||||
@@ -316,9 +319,10 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
count: 1,
|
||||
node: "n1",
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
@@ -326,6 +330,163 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
name: "6 domains, sizes [0,0,0,3,3,2], maxSkew=1, No pod is evicted since topology is balanced",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 3,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
name: "7 domains, sizes [0,0,0,3,3,2], maxSkew=1, two pods are evicted and moved to new node added",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n7", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeD"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 3,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 2,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
name: "6 domains, sizes [0,0,0,4,3,1], maxSkew=1, 1 pod is evicted from node 4",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 4,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
name: "6 domains, sizes [0,0,0,4,3,1], maxSkew=1, 0 pod is evicted as pods of node:winNodeA can only be scheduled on this node and nodeFit is true ",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 4,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"node": "winNodeA"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes",
|
||||
nodes: []*v1.Node{
|
||||
@@ -903,6 +1064,55 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pods given matchLabelKeys on same replicaset",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"},
|
||||
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"},
|
||||
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
expectedEvictedPods: []string{"pod-1"},
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods given matchLabelKeys on two different replicasets",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"},
|
||||
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "bar"},
|
||||
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved",
|
||||
nodes: []*v1.Node{
|
||||
@@ -1371,31 +1581,53 @@ func getDefaultTopologyConstraints(maxSkew int32) []v1.TopologySpreadConstraint
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckIdenticalConstraints(t *testing.T) {
|
||||
newConstraintSame := v1.TopologySpreadConstraint{
|
||||
MaxSkew: 2,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
}
|
||||
newConstraintDifferent := v1.TopologySpreadConstraint{
|
||||
MaxSkew: 3,
|
||||
TopologyKey: "node",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
}
|
||||
namespaceTopologySpreadConstraint := []v1.TopologySpreadConstraint{
|
||||
func getDefaultNodeTopologyConstraints(maxSkew int32) []v1.TopologySpreadConstraint {
|
||||
return []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 2,
|
||||
TopologyKey: "zone",
|
||||
MaxSkew: maxSkew,
|
||||
TopologyKey: "node",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getDefaultTopologyConstraintsWithPodTemplateHashMatch(maxSkew int32) []v1.TopologySpreadConstraint {
|
||||
return []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: maxSkew,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
MatchLabelKeys: []string{appsv1.DefaultDeploymentUniqueLabelKey},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckIdenticalConstraints(t *testing.T) {
|
||||
selector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}})
|
||||
|
||||
newConstraintSame := topologySpreadConstraint{
|
||||
maxSkew: 2,
|
||||
topologyKey: "zone",
|
||||
selector: selector.DeepCopySelector(),
|
||||
}
|
||||
newConstraintDifferent := topologySpreadConstraint{
|
||||
maxSkew: 3,
|
||||
topologyKey: "node",
|
||||
selector: selector.DeepCopySelector(),
|
||||
}
|
||||
namespaceTopologySpreadConstraint := []topologySpreadConstraint{
|
||||
{
|
||||
maxSkew: 2,
|
||||
topologyKey: "zone",
|
||||
selector: selector.DeepCopySelector(),
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
namespaceTopologySpreadConstraints []v1.TopologySpreadConstraint
|
||||
newConstraint v1.TopologySpreadConstraint
|
||||
namespaceTopologySpreadConstraints []topologySpreadConstraint
|
||||
newConstraint topologySpreadConstraint
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
var oneHourPodLifetimeSeconds uint = 3600
|
||||
@@ -131,7 +132,7 @@ func TestFailedPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func initFailedJob(name, namespace string) *batchv1.Job {
|
||||
podSpec := MakePodSpec("", nil)
|
||||
podSpec := test.MakePodSpec("", nil)
|
||||
podSpec.Containers[0].Command = []string{"/bin/false"}
|
||||
podSpec.RestartPolicy = v1.RestartPolicyNever
|
||||
labelsSet := labels.Set{"test": name, "name": name}
|
||||
|
||||
@@ -55,47 +55,9 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilpointer.Bool(true),
|
||||
RunAsUser: utilpointer.Int64(1000),
|
||||
RunAsGroup: utilpointer.Int64(1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "Never",
|
||||
Image: "registry.k8s.io/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("200Mi"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilpointer.Bool(false),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{
|
||||
"ALL",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
PriorityClassName: priorityClassName,
|
||||
TerminationGracePeriodSeconds: gracePeriod,
|
||||
}
|
||||
}
|
||||
|
||||
// RcByNameContainer returns a ReplicationController with specified name and container
|
||||
func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64, priorityClassName string) *v1.ReplicationController {
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
@@ -121,7 +83,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: MakePodSpec(priorityClassName, gracePeriod),
|
||||
Spec: test.MakePodSpec(priorityClassName, gracePeriod),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2,18 +2,19 @@ package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
const zoneTopologyKey string = "topology.kubernetes.io/zone"
|
||||
@@ -35,49 +36,107 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
testCases := map[string]struct {
|
||||
replicaCount int
|
||||
maxSkew int
|
||||
labelKey string
|
||||
labelValue string
|
||||
constraint v1.UnsatisfiableConstraintAction
|
||||
expectedEvictedCount uint
|
||||
replicaCount int
|
||||
topologySpreadConstraint v1.TopologySpreadConstraint
|
||||
}{
|
||||
"test-rc-topology-spread-hard-constraint": {
|
||||
replicaCount: 4,
|
||||
maxSkew: 1,
|
||||
labelKey: "test",
|
||||
labelValue: "topology-spread-hard-constraint",
|
||||
constraint: v1.DoNotSchedule,
|
||||
"test-topology-spread-hard-constraint": {
|
||||
expectedEvictedCount: 1,
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "topology-spread-hard-constraint",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
"test-rc-topology-spread-soft-constraint": {
|
||||
replicaCount: 4,
|
||||
maxSkew: 1,
|
||||
labelKey: "test",
|
||||
labelValue: "topology-spread-soft-constraint",
|
||||
constraint: v1.ScheduleAnyway,
|
||||
"test-topology-spread-soft-constraint": {
|
||||
expectedEvictedCount: 1,
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "topology-spread-soft-constraint",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
},
|
||||
"test-node-taints-policy-honor": {
|
||||
expectedEvictedCount: 1,
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "node-taints-policy-honor",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
NodeTaintsPolicy: nodeInclusionPolicyRef(v1.NodeInclusionPolicyHonor),
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
"test-node-affinity-policy-ignore": {
|
||||
expectedEvictedCount: 1,
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "node-taints-policy-honor",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
NodeAffinityPolicy: nodeInclusionPolicyRef(v1.NodeInclusionPolicyIgnore),
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
"test-match-label-keys": {
|
||||
expectedEvictedCount: 0,
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "match-label-keys",
|
||||
},
|
||||
},
|
||||
MatchLabelKeys: []string{appsv1.DefaultDeploymentUniqueLabelKey},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Logf("Creating RC %s with %d replicas", name, tc.replicaCount)
|
||||
rc := RcByNameContainer(name, testNamespace.Name, int32(tc.replicaCount), map[string]string{tc.labelKey: tc.labelValue}, nil, "")
|
||||
rc.Spec.Template.Spec.TopologySpreadConstraints = makeTopologySpreadConstraints(tc.maxSkew, tc.labelKey, tc.labelValue, tc.constraint)
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating RC %s %v", name, err)
|
||||
t.Logf("Creating Deployment %s with %d replicas", name, tc.replicaCount)
|
||||
deployment := test.BuildTestDeployment(name, testNamespace.Name, int32(tc.replicaCount), tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
|
||||
d.Spec.Template.Spec.TopologySpreadConstraints = []v1.TopologySpreadConstraint{tc.topologySpreadConstraint}
|
||||
})
|
||||
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating Deployment %s %v", name, err)
|
||||
}
|
||||
defer deleteRC(ctx, t, clientSet, rc)
|
||||
waitForRCPodsRunning(ctx, t, clientSet, rc)
|
||||
defer test.DeleteDeployment(ctx, t, clientSet, deployment)
|
||||
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
|
||||
|
||||
// Create a "Violator" RC that has the same label and is forced to be on the same node using a nodeSelector
|
||||
violatorRcName := name + "-violator"
|
||||
violatorCount := tc.maxSkew + 1
|
||||
violatorRc := RcByNameContainer(violatorRcName, testNamespace.Name, int32(violatorCount), map[string]string{tc.labelKey: tc.labelValue}, nil, "")
|
||||
violatorRc.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
|
||||
rc.Spec.Template.Spec.TopologySpreadConstraints = makeTopologySpreadConstraints(tc.maxSkew, tc.labelKey, tc.labelValue, tc.constraint)
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, violatorRc, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating RC %s: %v", violatorRcName, err)
|
||||
// Create a "Violator" Deployment that has the same label and is forced to be on the same node using a nodeSelector
|
||||
violatorDeploymentName := name + "-violator"
|
||||
violatorCount := tc.topologySpreadConstraint.MaxSkew + 1
|
||||
violatorDeployment := test.BuildTestDeployment(violatorDeploymentName, testNamespace.Name, violatorCount, tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
|
||||
d.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
|
||||
})
|
||||
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, violatorDeployment, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating Deployment %s: %v", violatorDeploymentName, err)
|
||||
}
|
||||
defer deleteRC(ctx, t, clientSet, violatorRc)
|
||||
waitForRCPodsRunning(ctx, t, clientSet, violatorRc)
|
||||
defer test.DeleteDeployment(ctx, t, clientSet, violatorDeployment)
|
||||
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, violatorDeployment)
|
||||
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
||||
|
||||
@@ -103,7 +162,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}
|
||||
|
||||
plugin, err := removepodsviolatingtopologyspreadconstraint.New(&removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{tc.constraint},
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{tc.topologySpreadConstraint.WhenUnsatisfiable},
|
||||
},
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: clientSet,
|
||||
@@ -120,18 +179,23 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
|
||||
|
||||
t.Logf("Wait for terminating pods of %s to disappear", name)
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, deployment.Namespace)
|
||||
|
||||
if totalEvicted := podEvictor.TotalEvicted(); totalEvicted > 0 {
|
||||
if totalEvicted := podEvictor.TotalEvicted(); totalEvicted == tc.expectedEvictedCount {
|
||||
t.Logf("Total of %d Pods were evicted for %s", totalEvicted, name)
|
||||
} else {
|
||||
t.Fatalf("Pods were not evicted for %s TopologySpreadConstraint", name)
|
||||
t.Fatalf("Expected %d evictions but got %d for %s TopologySpreadConstraint", tc.expectedEvictedCount, totalEvicted, name)
|
||||
}
|
||||
|
||||
if tc.expectedEvictedCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread
|
||||
waitForRCPodsRunning(ctx, t, clientSet, rc)
|
||||
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
|
||||
|
||||
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", tc.labelKey, tc.labelValue)})
|
||||
listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(tc.topologySpreadConstraint.LabelSelector.MatchLabels).String()}
|
||||
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, listOptions)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods for %s: %v", name, err)
|
||||
}
|
||||
@@ -146,26 +210,15 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}
|
||||
|
||||
min, max := getMinAndMaxPodDistribution(nodePodCountMap)
|
||||
if max-min > tc.maxSkew {
|
||||
t.Errorf("Pod distribution for %s is still violating the max skew of %d as it is %d", name, tc.maxSkew, max-min)
|
||||
if max-min > int(tc.topologySpreadConstraint.MaxSkew) {
|
||||
t.Errorf("Pod distribution for %s is still violating the max skew of %d as it is %d", name, tc.topologySpreadConstraint.MaxSkew, max-min)
|
||||
}
|
||||
|
||||
t.Logf("Pods for %s were distributed in line with max skew of %d", name, tc.maxSkew)
|
||||
t.Logf("Pods for %s were distributed in line with max skew of %d", name, tc.topologySpreadConstraint.MaxSkew)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makeTopologySpreadConstraints(maxSkew int, labelKey, labelValue string, constraint v1.UnsatisfiableConstraintAction) []v1.TopologySpreadConstraint {
|
||||
return []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: int32(maxSkew),
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: constraint,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{labelKey: labelValue}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMinAndMaxPodDistribution(nodePodCountMap map[string]int) (int, int) {
|
||||
min := math.MaxInt32
|
||||
max := math.MinInt32
|
||||
@@ -180,3 +233,7 @@ func getMinAndMaxPodDistribution(nodePodCountMap map[string]int) (int, int) {
|
||||
|
||||
return min, max
|
||||
}
|
||||
|
||||
func nodeInclusionPolicyRef(policy v1.NodeInclusionPolicy) *v1.NodeInclusionPolicy {
|
||||
return &policy
|
||||
}
|
||||
|
||||
@@ -17,13 +17,58 @@ limitations under the License.
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func BuildTestDeployment(name, namespace string, replicas int32, labels map[string]string, apply func(deployment *appsv1.Deployment)) *appsv1.Deployment {
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
|
||||
deployment := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: utilpointer.Int32(replicas),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: MakePodSpec("", utilpointer.Int64(0)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if apply != nil {
|
||||
apply(deployment)
|
||||
}
|
||||
|
||||
return deployment
|
||||
}
|
||||
|
||||
// BuildTestPod creates a test pod with given parameters.
|
||||
func BuildTestPod(name string, cpu, memory int64, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
@@ -124,6 +169,45 @@ func BuildTestNode(name string, millicpu, mem, pods int64, apply func(*v1.Node))
|
||||
return node
|
||||
}
|
||||
|
||||
func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilpointer.Bool(true),
|
||||
RunAsUser: utilpointer.Int64(1000),
|
||||
RunAsGroup: utilpointer.Int64(1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "Never",
|
||||
Image: "registry.k8s.io/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("200Mi"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilpointer.Bool(false),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{
|
||||
"ALL",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
PriorityClassName: priorityClassName,
|
||||
TerminationGracePeriodSeconds: gracePeriod,
|
||||
}
|
||||
}
|
||||
|
||||
// MakeBestEffortPod makes the given pod a BestEffort pod
|
||||
func MakeBestEffortPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
@@ -179,8 +263,77 @@ func SetPodExtendedResourceRequest(pod *v1.Pod, resourceName v1.ResourceName, re
|
||||
pod.Spec.Containers[0].Resources.Requests[resourceName] = *resource.NewQuantity(requestQuantity, resource.DecimalSI)
|
||||
}
|
||||
|
||||
// SetNodeExtendedResouces sets the given node's extended resources
|
||||
// SetNodeExtendedResource sets the given node's extended resources
|
||||
func SetNodeExtendedResource(node *v1.Node, resourceName v1.ResourceName, requestQuantity int64) {
|
||||
node.Status.Capacity[resourceName] = *resource.NewQuantity(requestQuantity, resource.DecimalSI)
|
||||
node.Status.Allocatable[resourceName] = *resource.NewQuantity(requestQuantity, resource.DecimalSI)
|
||||
}
|
||||
|
||||
func DeleteDeployment(ctx context.Context, t *testing.T, clientSet clientset.Interface, deployment *appsv1.Deployment) {
|
||||
// set number of replicas to 0
|
||||
deploymentCopy := deployment.DeepCopy()
|
||||
deploymentCopy.Spec.Replicas = utilpointer.Int32(0)
|
||||
if _, err := clientSet.AppsV1().Deployments(deploymentCopy.Namespace).Update(ctx, deploymentCopy, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Error updating replica controller %v", err)
|
||||
}
|
||||
|
||||
// wait 30 seconds until all pods are deleted
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(c context.Context) (bool, error) {
|
||||
scale, err := clientSet.AppsV1().Deployments(deployment.Namespace).GetScale(c, deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return scale.Spec.Replicas == 0, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error deleting Deployment pods %v", err)
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(c context.Context) (bool, error) {
|
||||
podList, _ := clientSet.CoreV1().Pods(deployment.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Template.Labels).String()})
|
||||
t.Logf("Waiting for %v Deployment pods to disappear, still %v remaining", deployment.Name, len(podList.Items))
|
||||
if len(podList.Items) > 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for Deployment pods to disappear: %v", err)
|
||||
}
|
||||
|
||||
if err := clientSet.AppsV1().Deployments(deployment.Namespace).Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Error deleting Deployment %v", err)
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(c context.Context) (bool, error) {
|
||||
_, err := clientSet.AppsV1().Deployments(deployment.Namespace).Get(ctx, deployment.Name, metav1.GetOptions{})
|
||||
if err != nil && strings.Contains(err.Error(), "not found") {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error deleting Deployment %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func WaitForDeploymentPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, deployment *appsv1.Deployment) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(c context.Context) (bool, error) {
|
||||
podList, err := clientSet.CoreV1().Pods(deployment.Namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(deployment.Spec.Template.ObjectMeta.Labels).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(podList.Items) != int(*deployment.Spec.Replicas) {
|
||||
t.Logf("Waiting for %v pods to be created, got %v instead", *deployment.Spec.Replicas, len(podList.Items))
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods running: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
6
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
6
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
@@ -9,6 +9,8 @@ func Render(doc []byte) []byte {
|
||||
renderer := NewRoffRenderer()
|
||||
|
||||
return blackfriday.Run(doc,
|
||||
[]blackfriday.Option{blackfriday.WithRenderer(renderer),
|
||||
blackfriday.WithExtensions(renderer.GetExtensions())}...)
|
||||
[]blackfriday.Option{
|
||||
blackfriday.WithRenderer(renderer),
|
||||
blackfriday.WithExtensions(renderer.GetExtensions()),
|
||||
}...)
|
||||
}
|
||||
|
||||
30
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
30
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package md2man
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -34,10 +35,10 @@ const (
|
||||
hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
|
||||
linkTag = "\n\\[la]"
|
||||
linkCloseTag = "\\[ra]"
|
||||
codespanTag = "\\fB\\fC"
|
||||
codespanTag = "\\fB"
|
||||
codespanCloseTag = "\\fR"
|
||||
codeTag = "\n.PP\n.RS\n\n.nf\n"
|
||||
codeCloseTag = "\n.fi\n.RE\n"
|
||||
codeTag = "\n.EX\n"
|
||||
codeCloseTag = "\n.EE\n"
|
||||
quoteTag = "\n.PP\n.RS\n"
|
||||
quoteCloseTag = "\n.RE\n"
|
||||
listTag = "\n.RS\n"
|
||||
@@ -86,8 +87,7 @@ func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
||||
// RenderNode is called for each node in a markdown document; based on the node
|
||||
// type the equivalent roff output is sent to the writer
|
||||
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||
|
||||
var walkAction = blackfriday.GoToNext
|
||||
walkAction := blackfriday.GoToNext
|
||||
|
||||
switch node.Type {
|
||||
case blackfriday.Text:
|
||||
@@ -109,9 +109,16 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
||||
out(w, strongCloseTag)
|
||||
}
|
||||
case blackfriday.Link:
|
||||
if !entering {
|
||||
out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
|
||||
// Don't render the link text for automatic links, because this
|
||||
// will only duplicate the URL in the roff output.
|
||||
// See https://daringfireball.net/projects/markdown/syntax#autolink
|
||||
if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) {
|
||||
out(w, string(node.FirstChild.Literal))
|
||||
}
|
||||
// Hyphens in a link must be escaped to avoid word-wrap in the rendered man page.
|
||||
escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-")
|
||||
out(w, linkTag+escapedLink+linkCloseTag)
|
||||
walkAction = blackfriday.SkipChildren
|
||||
case blackfriday.Image:
|
||||
// ignore images
|
||||
walkAction = blackfriday.SkipChildren
|
||||
@@ -160,6 +167,11 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
||||
r.handleTableCell(w, node, entering)
|
||||
case blackfriday.HTMLSpan:
|
||||
// ignore other HTML tags
|
||||
case blackfriday.HTMLBlock:
|
||||
if bytes.HasPrefix(node.Literal, []byte("<!--")) {
|
||||
break // ignore comments, no warning
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||
default:
|
||||
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||
}
|
||||
@@ -254,7 +266,7 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente
|
||||
start = "\t"
|
||||
}
|
||||
if node.IsHeader {
|
||||
start += codespanTag
|
||||
start += strongTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
start += tableCellStart
|
||||
}
|
||||
@@ -262,7 +274,7 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente
|
||||
} else {
|
||||
var end string
|
||||
if node.IsHeader {
|
||||
end = codespanCloseTag
|
||||
end = strongCloseTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
end = tableCellEnd
|
||||
}
|
||||
|
||||
113
vendor/github.com/go-logr/logr/README.md
generated
vendored
113
vendor/github.com/go-logr/logr/README.md
generated
vendored
@@ -1,6 +1,7 @@
|
||||
# A minimal logging API for Go
|
||||
|
||||
[](https://pkg.go.dev/github.com/go-logr/logr)
|
||||
[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
|
||||
|
||||
logr offers an(other) opinion on how Go programs and libraries can do logging
|
||||
without becoming coupled to a particular logging implementation. This is not
|
||||
@@ -73,6 +74,29 @@ received:
|
||||
If the Go standard library had defined an interface for logging, this project
|
||||
probably would not be needed. Alas, here we are.
|
||||
|
||||
When the Go developers started developing such an interface with
|
||||
[slog](https://github.com/golang/go/issues/56345), they adopted some of the
|
||||
logr design but also left out some parts and changed others:
|
||||
|
||||
| Feature | logr | slog |
|
||||
|---------|------|------|
|
||||
| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
|
||||
| Low-level API | `LogSink` | `Handler` |
|
||||
| Stack unwinding | done by `LogSink` | done by `Logger` |
|
||||
| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
|
||||
| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
|
||||
| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
|
||||
| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
|
||||
| Passing logger via context | `NewContext`, `FromContext` | no API |
|
||||
| Adding a name to a logger | `WithName` | no API |
|
||||
| Modify verbosity of log entries in a call chain | `V` | no API |
|
||||
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
|
||||
|
||||
The high-level slog API is explicitly meant to be one of many different APIs
|
||||
that can be layered on top of a shared `slog.Handler`. logr is one such
|
||||
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
|
||||
package.
|
||||
|
||||
### Inspiration
|
||||
|
||||
Before you consider this package, please read [this blog post by the
|
||||
@@ -118,6 +142,91 @@ There are implementations for the following logging libraries:
|
||||
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
|
||||
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
|
||||
|
||||
## slog interoperability
|
||||
|
||||
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
|
||||
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
|
||||
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
|
||||
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
|
||||
slog API. `slogr` itself leaves that to the caller.
|
||||
|
||||
## Using a `logr.Sink` as backend for slog
|
||||
|
||||
Ideally, a logr sink implementation should support both logr and slog by
|
||||
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
|
||||
of a conflict in the parameters of the common `Enabled` method, it is [not
|
||||
possible to implement both slog.Handler and logr.Sink in the same
|
||||
type](https://github.com/golang/go/issues/59110).
|
||||
|
||||
If both are supported, log calls can go from the high-level APIs to the backend
|
||||
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
|
||||
convert back and forth without adding additional wrappers, with one exception:
|
||||
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
|
||||
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
|
||||
log calls.
|
||||
|
||||
Such an implementation should also support values that implement specific
|
||||
interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
|
||||
`slog.GroupValue`). logr does not convert those.
|
||||
|
||||
Not supporting slog has several drawbacks:
|
||||
- Recording source code locations works correctly if the handler gets called
|
||||
through `slog.Logger`, but may be wrong in other cases. That's because a
|
||||
`logr.Sink` does its own stack unwinding instead of using the program counter
|
||||
provided by the high-level API.
|
||||
- slog levels <= 0 can be mapped to logr levels by negating the level without a
|
||||
loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
|
||||
used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
|
||||
because logr does not support "more important than info" levels.
|
||||
- The slog group concept is supported by prefixing each key in a key/value
|
||||
pair with the group names, separated by a dot. For structured output like
|
||||
JSON it would be better to group the key/value pairs inside an object.
|
||||
- Special slog values and interfaces don't work as expected.
|
||||
- The overhead is likely to be higher.
|
||||
|
||||
These drawbacks are severe enough that applications using a mixture of slog and
|
||||
logr should switch to a different backend.
|
||||
|
||||
## Using a `slog.Handler` as backend for logr
|
||||
|
||||
Using a plain `slog.Handler` without support for logr works better than the
|
||||
other direction:
|
||||
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
|
||||
by negating them.
|
||||
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
|
||||
counter is passed to the `slog.Handler`.
|
||||
- Names added via `Logger.WithName` are gathered and recorded in an additional
|
||||
attribute with `logger` as key and the names separated by slash as value.
|
||||
- `Logger.Error` is turned into a log record with `slog.LevelError` as level
|
||||
and an additional attribute with `err` as key, if an error was provided.
|
||||
|
||||
The main drawback is that `logr.Marshaler` will not be supported. Types should
|
||||
ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
|
||||
with logr implementations without slog support is not important, then
|
||||
`slog.Valuer` is sufficient.
|
||||
|
||||
## Context support for slog
|
||||
|
||||
Storing a logger in a `context.Context` is not supported by
|
||||
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
|
||||
to fill this gap:
|
||||
|
||||
func HandlerFromContext(ctx context.Context) slog.Handler {
|
||||
logger, err := logr.FromContext(ctx)
|
||||
if err == nil {
|
||||
return slogr.NewSlogHandler(logger)
|
||||
}
|
||||
return slog.Default().Handler()
|
||||
}
|
||||
|
||||
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
|
||||
return logr.NewContext(ctx, slogr.NewLogr(handler))
|
||||
}
|
||||
|
||||
The downside is that storing and retrieving a `slog.Handler` needs more
|
||||
allocations compared to using a `logr.Logger`. Therefore the recommendation is
|
||||
to use the `logr.Logger` API in code which uses contextual logging.
|
||||
|
||||
## FAQ
|
||||
|
||||
### Conceptual
|
||||
@@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this",
|
||||
|
||||
Then gradually choose levels in between as you need them, working your way
|
||||
down from 10 (for debug and trace style logs) and up from 1 (for chattier
|
||||
info-type logs.)
|
||||
info-type logs). For reference, slog pre-defines -4 for debug logs
|
||||
(corresponds to 4 in logr), which matches what is
|
||||
[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
|
||||
|
||||
#### How do I choose my keys?
|
||||
|
||||
|
||||
18
vendor/github.com/go-logr/logr/SECURITY.md
generated
vendored
Normal file
18
vendor/github.com/go-logr/logr/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Security Policy
|
||||
|
||||
If you have discovered a security vulnerability in this project, please report it
|
||||
privately. **Do not disclose it as a public issue.** This gives us time to work with you
|
||||
to fix the issue before public exposure, reducing the chance that the exploit will be
|
||||
used before a patch is released.
|
||||
|
||||
You may submit the report in the following ways:
|
||||
|
||||
- send an email to go-logr-security@googlegroups.com
|
||||
- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
|
||||
|
||||
Please provide the following information in your report:
|
||||
|
||||
- A description of the vulnerability and its impact
|
||||
- How to reproduce the issue
|
||||
|
||||
We ask that you give us 90 days to work on a fix before public exposure.
|
||||
48
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
48
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
@@ -116,17 +116,17 @@ type Options struct {
|
||||
// Equivalent hooks are offered for key-value pairs saved via
|
||||
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
|
||||
// for user-provided pairs (see RenderArgsHook).
|
||||
RenderBuiltinsHook func(kvList []interface{}) []interface{}
|
||||
RenderBuiltinsHook func(kvList []any) []any
|
||||
|
||||
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
|
||||
// only called for key-value pairs saved via logr.Logger.WithValues. See
|
||||
// RenderBuiltinsHook for more details.
|
||||
RenderValuesHook func(kvList []interface{}) []interface{}
|
||||
RenderValuesHook func(kvList []any) []any
|
||||
|
||||
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
|
||||
// called for key-value pairs passed directly to Info and Error. See
|
||||
// RenderBuiltinsHook for more details.
|
||||
RenderArgsHook func(kvList []interface{}) []interface{}
|
||||
RenderArgsHook func(kvList []any) []any
|
||||
|
||||
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
|
||||
// that contains a struct, etc.) it may log. Every time it finds a struct,
|
||||
@@ -163,7 +163,7 @@ func (l fnlogger) WithName(name string) logr.LogSink {
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
|
||||
func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
|
||||
l.Formatter.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
@@ -173,12 +173,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
func (l fnlogger) Info(level int, msg string, kvList ...any) {
|
||||
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||
l.write(prefix, args)
|
||||
}
|
||||
|
||||
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
func (l fnlogger) Error(err error, msg string, kvList ...any) {
|
||||
prefix, args := l.FormatError(err, msg, kvList)
|
||||
l.write(prefix, args)
|
||||
}
|
||||
@@ -229,7 +229,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||
type Formatter struct {
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []interface{}
|
||||
values []any
|
||||
valuesStr string
|
||||
depth int
|
||||
opts *Options
|
||||
@@ -246,10 +246,10 @@ const (
|
||||
)
|
||||
|
||||
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
|
||||
type PseudoStruct []interface{}
|
||||
type PseudoStruct []any
|
||||
|
||||
// render produces a log line, ready to use.
|
||||
func (f Formatter) render(builtins, args []interface{}) string {
|
||||
func (f Formatter) render(builtins, args []any) string {
|
||||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
if f.outputFormat == outputJSON {
|
||||
@@ -292,7 +292,7 @@ func (f Formatter) render(builtins, args []interface{}) string {
|
||||
// This function returns a potentially modified version of kvList, which
|
||||
// ensures that there is a value for every key (adding a value if needed) and
|
||||
// that each key is a string (substituting a key if needed).
|
||||
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
|
||||
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
|
||||
// This logic overlaps with sanitize() but saves one type-cast per key,
|
||||
// which can be measurable.
|
||||
if len(kvList)%2 != 0 {
|
||||
@@ -334,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b
|
||||
return kvList
|
||||
}
|
||||
|
||||
func (f Formatter) pretty(value interface{}) string {
|
||||
func (f Formatter) pretty(value any) string {
|
||||
return f.prettyWithFlags(value, 0, 0)
|
||||
}
|
||||
|
||||
@@ -343,7 +343,7 @@ const (
|
||||
)
|
||||
|
||||
// TODO: This is not fast. Most of the overhead goes here.
|
||||
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
|
||||
func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||
if depth > f.opts.MaxLogDepth {
|
||||
return `"<max-log-depth-exceeded>"`
|
||||
}
|
||||
@@ -614,7 +614,7 @@ func isEmpty(v reflect.Value) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
|
||||
func invokeMarshaler(m logr.Marshaler) (ret any) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
@@ -675,12 +675,12 @@ func (f Formatter) caller() Caller {
|
||||
|
||||
const noValue = "<no-value>"
|
||||
|
||||
func (f Formatter) nonStringKey(v interface{}) string {
|
||||
func (f Formatter) nonStringKey(v any) string {
|
||||
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
|
||||
}
|
||||
|
||||
// snippet produces a short snippet string of an arbitrary value.
|
||||
func (f Formatter) snippet(v interface{}) string {
|
||||
func (f Formatter) snippet(v any) string {
|
||||
const snipLen = 16
|
||||
|
||||
snip := f.pretty(v)
|
||||
@@ -693,7 +693,7 @@ func (f Formatter) snippet(v interface{}) string {
|
||||
// sanitize ensures that a list of key-value pairs has a value for every key
|
||||
// (adding a value if needed) and that each key is a string (substituting a key
|
||||
// if needed).
|
||||
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
|
||||
func (f Formatter) sanitize(kvList []any) []any {
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
@@ -727,8 +727,8 @@ func (f Formatter) GetDepth() int {
|
||||
// FormatInfo renders an Info log message into strings. The prefix will be
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// configured for JSON.
|
||||
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
|
||||
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
||||
func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
|
||||
args := make([]any, 0, 64) // using a constant here impacts perf
|
||||
prefix = f.prefix
|
||||
if f.outputFormat == outputJSON {
|
||||
args = append(args, "logger", prefix)
|
||||
@@ -745,10 +745,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref
|
||||
}
|
||||
|
||||
// FormatError renders an Error log message into strings. The prefix will be
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// configured for JSON.
|
||||
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
|
||||
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
||||
func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
|
||||
args := make([]any, 0, 64) // using a constant here impacts perf
|
||||
prefix = f.prefix
|
||||
if f.outputFormat == outputJSON {
|
||||
args = append(args, "logger", prefix)
|
||||
@@ -761,12 +761,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
args = append(args, "msg", msg)
|
||||
var loggableErr interface{}
|
||||
var loggableErr any
|
||||
if err != nil {
|
||||
loggableErr = err.Error()
|
||||
}
|
||||
args = append(args, "error", loggableErr)
|
||||
return f.prefix, f.render(args, kvList)
|
||||
return prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
// AddName appends the specified name. funcr uses '/' characters to separate
|
||||
@@ -781,7 +781,7 @@ func (f *Formatter) AddName(name string) {
|
||||
|
||||
// AddValues adds key-value pairs to the set of saved values to be logged with
|
||||
// each log line.
|
||||
func (f *Formatter) AddValues(kvList []interface{}) {
|
||||
func (f *Formatter) AddValues(kvList []any) {
|
||||
// Three slice args forces a copy.
|
||||
n := len(f.values)
|
||||
f.values = append(f.values[:n:n], kvList...)
|
||||
|
||||
35
vendor/github.com/go-logr/logr/logr.go
generated
vendored
35
vendor/github.com/go-logr/logr/logr.go
generated
vendored
@@ -127,9 +127,9 @@ limitations under the License.
|
||||
// such a value can call its methods without having to check whether the
|
||||
// instance is ready for use.
|
||||
//
|
||||
// Calling methods with the null logger (Logger{}) as instance will crash
|
||||
// because it has no LogSink. Therefore this null logger should never be passed
|
||||
// around. For cases where passing a logger is optional, a pointer to Logger
|
||||
// The zero logger (= Logger{}) is identical to Discard() and discards all log
|
||||
// entries. Code that receives a Logger by value can simply call it, the methods
|
||||
// will never crash. For cases where passing a logger is optional, a pointer to Logger
|
||||
// should be used.
|
||||
//
|
||||
// # Key Naming Conventions
|
||||
@@ -258,6 +258,12 @@ type Logger struct {
|
||||
// Enabled tests whether this Logger is enabled. For example, commandline
|
||||
// flags might be used to set the logging verbosity and disable some info logs.
|
||||
func (l Logger) Enabled() bool {
|
||||
// Some implementations of LogSink look at the caller in Enabled (e.g.
|
||||
// different verbosity levels per package or file), but we only pass one
|
||||
// CallDepth in (via Init). This means that all calls from Logger to the
|
||||
// LogSink's Enabled, Info, and Error methods must have the same number of
|
||||
// frames. In other words, Logger methods can't call other Logger methods
|
||||
// which call these LogSink methods unless we do it the same in all paths.
|
||||
return l.sink != nil && l.sink.Enabled(l.level)
|
||||
}
|
||||
|
||||
@@ -267,11 +273,11 @@ func (l Logger) Enabled() bool {
|
||||
// line. The key/value pairs can then be used to add additional variable
|
||||
// information. The key/value pairs must alternate string keys and arbitrary
|
||||
// values.
|
||||
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
|
||||
func (l Logger) Info(msg string, keysAndValues ...any) {
|
||||
if l.sink == nil {
|
||||
return
|
||||
}
|
||||
if l.Enabled() {
|
||||
if l.sink.Enabled(l.level) { // see comment in Enabled
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
}
|
||||
@@ -289,7 +295,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
|
||||
// while the err argument should be used to attach the actual error that
|
||||
// triggered this log line, if present. The err parameter is optional
|
||||
// and nil may be passed instead of an error instance.
|
||||
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
func (l Logger) Error(err error, msg string, keysAndValues ...any) {
|
||||
if l.sink == nil {
|
||||
return
|
||||
}
|
||||
@@ -314,9 +320,16 @@ func (l Logger) V(level int) Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
// GetV returns the verbosity level of the logger. If the logger's LogSink is
|
||||
// nil as in the Discard logger, this will always return 0.
|
||||
func (l Logger) GetV() int {
|
||||
// 0 if l.sink nil because of the if check in V above.
|
||||
return l.level
|
||||
}
|
||||
|
||||
// WithValues returns a new Logger instance with additional key/value pairs.
|
||||
// See Info for documentation on how key/value pairs work.
|
||||
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
|
||||
func (l Logger) WithValues(keysAndValues ...any) Logger {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
@@ -467,15 +480,15 @@ type LogSink interface {
|
||||
// The level argument is provided for optional logging. This method will
|
||||
// only be called when Enabled(level) is true. See Logger.Info for more
|
||||
// details.
|
||||
Info(level int, msg string, keysAndValues ...interface{})
|
||||
Info(level int, msg string, keysAndValues ...any)
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as
|
||||
// context. See Logger.Error for more details.
|
||||
Error(err error, msg string, keysAndValues ...interface{})
|
||||
Error(err error, msg string, keysAndValues ...any)
|
||||
|
||||
// WithValues returns a new LogSink with additional key/value pairs. See
|
||||
// Logger.WithValues for more details.
|
||||
WithValues(keysAndValues ...interface{}) LogSink
|
||||
WithValues(keysAndValues ...any) LogSink
|
||||
|
||||
// WithName returns a new LogSink with the specified name appended. See
|
||||
// Logger.WithName for more details.
|
||||
@@ -546,5 +559,5 @@ type Marshaler interface {
|
||||
// with exported fields
|
||||
//
|
||||
// It may return any value of any type.
|
||||
MarshalLog() interface{}
|
||||
MarshalLog() any
|
||||
}
|
||||
|
||||
168
vendor/github.com/go-logr/logr/slogr/sloghandler.go
generated
vendored
Normal file
168
vendor/github.com/go-logr/logr/slogr/sloghandler.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package slogr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
type slogHandler struct {
|
||||
// May be nil, in which case all logs get discarded.
|
||||
sink logr.LogSink
|
||||
// Non-nil if sink is non-nil and implements SlogSink.
|
||||
slogSink SlogSink
|
||||
|
||||
// groupPrefix collects values from WithGroup calls. It gets added as
|
||||
// prefix to value keys when handling a log record.
|
||||
groupPrefix string
|
||||
|
||||
// levelBias can be set when constructing the handler to influence the
|
||||
// slog.Level of log records. A positive levelBias reduces the
|
||||
// slog.Level value. slog has no API to influence this value after the
|
||||
// handler got created, so it can only be set indirectly through
|
||||
// Logger.V.
|
||||
levelBias slog.Level
|
||||
}
|
||||
|
||||
var _ slog.Handler = &slogHandler{}
|
||||
|
||||
// groupSeparator is used to concatenate WithGroup names and attribute keys.
|
||||
const groupSeparator = "."
|
||||
|
||||
// GetLevel is used for black box unit testing.
|
||||
func (l *slogHandler) GetLevel() slog.Level {
|
||||
return l.levelBias
|
||||
}
|
||||
|
||||
func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool {
|
||||
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
|
||||
}
|
||||
|
||||
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
|
||||
if l.slogSink != nil {
|
||||
// Only adjust verbosity level of log entries < slog.LevelError.
|
||||
if record.Level < slog.LevelError {
|
||||
record.Level -= l.levelBias
|
||||
}
|
||||
return l.slogSink.Handle(ctx, record)
|
||||
}
|
||||
|
||||
// No need to check for nil sink here because Handle will only be called
|
||||
// when Enabled returned true.
|
||||
|
||||
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
if attr.Key != "" {
|
||||
kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any())
|
||||
}
|
||||
return true
|
||||
})
|
||||
if record.Level >= slog.LevelError {
|
||||
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
|
||||
} else {
|
||||
level := l.levelFromSlog(record.Level)
|
||||
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
|
||||
// are called by Handle, code in slog gets skipped.
|
||||
//
|
||||
// This offset currently (Go 1.21.0) works for calls through
|
||||
// slog.New(NewSlogHandler(...)). There's no guarantee that the call
|
||||
// chain won't change. Wrapping the handler will also break unwinding. It's
|
||||
// still better than not adjusting at all....
|
||||
//
|
||||
// This cannot be done when constructing the handler because NewLogr needs
|
||||
// access to the original sink without this adjustment. A second copy would
|
||||
// work, but then WithAttrs would have to be called for both of them.
|
||||
func (l *slogHandler) sinkWithCallDepth() logr.LogSink {
|
||||
if sink, ok := l.sink.(logr.CallDepthLogSink); ok {
|
||||
return sink.WithCallDepth(2)
|
||||
}
|
||||
return l.sink
|
||||
}
|
||||
|
||||
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
if l.sink == nil || len(attrs) == 0 {
|
||||
return l
|
||||
}
|
||||
|
||||
copy := *l
|
||||
if l.slogSink != nil {
|
||||
copy.slogSink = l.slogSink.WithAttrs(attrs)
|
||||
copy.sink = copy.slogSink
|
||||
} else {
|
||||
kvList := make([]any, 0, 2*len(attrs))
|
||||
for _, attr := range attrs {
|
||||
if attr.Key != "" {
|
||||
kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any())
|
||||
}
|
||||
}
|
||||
copy.sink = l.sink.WithValues(kvList...)
|
||||
}
|
||||
return ©
|
||||
}
|
||||
|
||||
func (l *slogHandler) WithGroup(name string) slog.Handler {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
copy := *l
|
||||
if l.slogSink != nil {
|
||||
copy.slogSink = l.slogSink.WithGroup(name)
|
||||
copy.sink = l.slogSink
|
||||
} else {
|
||||
copy.groupPrefix = copy.addGroupPrefix(name)
|
||||
}
|
||||
return ©
|
||||
}
|
||||
|
||||
func (l *slogHandler) addGroupPrefix(name string) string {
|
||||
if l.groupPrefix == "" {
|
||||
return name
|
||||
}
|
||||
return l.groupPrefix + groupSeparator + name
|
||||
}
|
||||
|
||||
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||
// It ensures that the result is >= 0. This is necessary because the result is
|
||||
// passed to a logr.LogSink and that API did not historically document whether
|
||||
// levels could be negative or what that meant.
|
||||
//
|
||||
// Some example usage:
|
||||
// logrV0 := getMyLogger()
|
||||
// logrV2 := logrV0.V(2)
|
||||
// slogV2 := slog.New(slogr.NewSlogHandler(logrV2))
|
||||
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||
func (l *slogHandler) levelFromSlog(level slog.Level) int {
|
||||
result := -level
|
||||
result += l.levelBias // in case the original logr.Logger had a V level
|
||||
if result < 0 {
|
||||
result = 0 // because logr.LogSink doesn't expect negative V levels
|
||||
}
|
||||
return int(result)
|
||||
}
|
||||
108
vendor/github.com/go-logr/logr/slogr/slogr.go
generated
vendored
Normal file
108
vendor/github.com/go-logr/logr/slogr/slogr.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package slogr enables usage of a slog.Handler with logr.Logger as front-end
|
||||
// API and of a logr.LogSink through the slog.Handler and thus slog.Logger
|
||||
// APIs.
|
||||
//
|
||||
// See the README in the top-level [./logr] package for a discussion of
|
||||
// interoperability.
|
||||
package slogr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// NewLogr returns a logr.Logger which writes to the slog.Handler.
|
||||
//
|
||||
// The logr verbosity level is mapped to slog levels such that V(0) becomes
|
||||
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
|
||||
func NewLogr(handler slog.Handler) logr.Logger {
|
||||
if handler, ok := handler.(*slogHandler); ok {
|
||||
if handler.sink == nil {
|
||||
return logr.Discard()
|
||||
}
|
||||
return logr.New(handler.sink).V(int(handler.levelBias))
|
||||
}
|
||||
return logr.New(&slogSink{handler: handler})
|
||||
}
|
||||
|
||||
// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
|
||||
//
|
||||
// The returned logger writes all records with level >= slog.LevelError as
|
||||
// error log entries with LogSink.Error, regardless of the verbosity level of
|
||||
// the logr.Logger:
|
||||
//
|
||||
// logger := <some logr.Logger with 0 as verbosity level>
|
||||
// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
|
||||
//
|
||||
// The level of all other records gets reduced by the verbosity
|
||||
// level of the logr.Logger and the result is negated. If it happens
|
||||
// to be negative, then it gets replaced by zero because a LogSink
|
||||
// is not expected to handled negative levels:
|
||||
//
|
||||
// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
|
||||
// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
|
||||
func NewSlogHandler(logger logr.Logger) slog.Handler {
|
||||
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
|
||||
return sink.handler
|
||||
}
|
||||
|
||||
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
|
||||
if slogSink, ok := handler.sink.(SlogSink); ok {
|
||||
handler.slogSink = slogSink
|
||||
}
|
||||
return handler
|
||||
}
|
||||
|
||||
// SlogSink is an optional interface that a LogSink can implement to support
|
||||
// logging through the slog.Logger or slog.Handler APIs better. It then should
|
||||
// also support special slog values like slog.Group. When used as a
|
||||
// slog.Handler, the advantages are:
|
||||
//
|
||||
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
|
||||
// as intended by slog
|
||||
// - proper grouping of key/value pairs via WithGroup
|
||||
// - verbosity levels > slog.LevelInfo can be recorded
|
||||
// - less overhead
|
||||
//
|
||||
// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally
|
||||
// well. Developers can pick whatever API suits them better and/or mix
|
||||
// packages which use either API in the same binary with a common logging
|
||||
// implementation.
|
||||
//
|
||||
// This interface is necessary because the type implementing the LogSink
|
||||
// interface cannot also implement the slog.Handler interface due to the
|
||||
// different prototype of the common Enabled method.
|
||||
//
|
||||
// An implementation could support both interfaces in two different types, but then
|
||||
// additional interfaces would be needed to convert between those types in NewLogr
|
||||
// and NewSlogHandler.
|
||||
type SlogSink interface {
|
||||
logr.LogSink
|
||||
|
||||
Handle(ctx context.Context, record slog.Record) error
|
||||
WithAttrs(attrs []slog.Attr) SlogSink
|
||||
WithGroup(name string) SlogSink
|
||||
}
|
||||
122
vendor/github.com/go-logr/logr/slogr/slogsink.go
generated
vendored
Normal file
122
vendor/github.com/go-logr/logr/slogr/slogsink.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package slogr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
var (
|
||||
_ logr.LogSink = &slogSink{}
|
||||
_ logr.CallDepthLogSink = &slogSink{}
|
||||
_ Underlier = &slogSink{}
|
||||
)
|
||||
|
||||
// Underlier is implemented by the LogSink returned by NewLogr.
|
||||
type Underlier interface {
|
||||
// GetUnderlying returns the Handler used by the LogSink.
|
||||
GetUnderlying() slog.Handler
|
||||
}
|
||||
|
||||
const (
|
||||
// nameKey is used to log the `WithName` values as an additional attribute.
|
||||
nameKey = "logger"
|
||||
|
||||
// errKey is used to log the error parameter of Error as an additional attribute.
|
||||
errKey = "err"
|
||||
)
|
||||
|
||||
type slogSink struct {
|
||||
callDepth int
|
||||
name string
|
||||
handler slog.Handler
|
||||
}
|
||||
|
||||
func (l *slogSink) Init(info logr.RuntimeInfo) {
|
||||
l.callDepth = info.CallDepth
|
||||
}
|
||||
|
||||
func (l *slogSink) GetUnderlying() slog.Handler {
|
||||
return l.handler
|
||||
}
|
||||
|
||||
func (l *slogSink) WithCallDepth(depth int) logr.LogSink {
|
||||
newLogger := *l
|
||||
newLogger.callDepth += depth
|
||||
return &newLogger
|
||||
}
|
||||
|
||||
func (l *slogSink) Enabled(level int) bool {
|
||||
return l.handler.Enabled(context.Background(), slog.Level(-level))
|
||||
}
|
||||
|
||||
func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
|
||||
l.log(nil, msg, slog.Level(-level), kvList...)
|
||||
}
|
||||
|
||||
func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
|
||||
l.log(err, msg, slog.LevelError, kvList...)
|
||||
}
|
||||
|
||||
func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
|
||||
var pcs [1]uintptr
|
||||
// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
|
||||
runtime.Callers(3+l.callDepth, pcs[:])
|
||||
|
||||
record := slog.NewRecord(time.Now(), level, msg, pcs[0])
|
||||
if l.name != "" {
|
||||
record.AddAttrs(slog.String(nameKey, l.name))
|
||||
}
|
||||
if err != nil {
|
||||
record.AddAttrs(slog.Any(errKey, err))
|
||||
}
|
||||
record.Add(kvList...)
|
||||
l.handler.Handle(context.Background(), record)
|
||||
}
|
||||
|
||||
func (l slogSink) WithName(name string) logr.LogSink {
|
||||
if l.name != "" {
|
||||
l.name = l.name + "/"
|
||||
}
|
||||
l.name += name
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink {
|
||||
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
|
||||
return &l
|
||||
}
|
||||
|
||||
func kvListToAttrs(kvList ...interface{}) []slog.Attr {
|
||||
// We don't need the record itself, only its Add method.
|
||||
record := slog.NewRecord(time.Time{}, 0, "", 0)
|
||||
record.Add(kvList...)
|
||||
attrs := make([]slog.Attr, 0, record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
attrs = append(attrs, attr)
|
||||
return true
|
||||
})
|
||||
return attrs
|
||||
}
|
||||
26
vendor/github.com/google/cel-go/checker/cost.go
generated
vendored
26
vendor/github.com/google/cel-go/checker/cost.go
generated
vendored
@@ -533,14 +533,34 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
|
||||
|
||||
if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil {
|
||||
callEst := *est
|
||||
return CallEstimate{CostEstimate: callEst.Add(argCostSum())}
|
||||
return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
|
||||
}
|
||||
switch overloadID {
|
||||
// O(n) functions
|
||||
case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString:
|
||||
if overloadID == overloads.ExtFormatString {
|
||||
case overloads.ExtFormatString:
|
||||
if target != nil {
|
||||
// ResultSize not calculated because we can't bound the max size.
|
||||
return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
|
||||
}
|
||||
case overloads.StringToBytes:
|
||||
if len(args) == 1 {
|
||||
sz := c.sizeEstimate(args[0])
|
||||
// ResultSize max is when each char converts to 4 bytes.
|
||||
return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}}
|
||||
}
|
||||
case overloads.BytesToString:
|
||||
if len(args) == 1 {
|
||||
sz := c.sizeEstimate(args[0])
|
||||
// ResultSize min is when 4 bytes convert to 1 char.
|
||||
return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}}
|
||||
}
|
||||
case overloads.ExtQuoteString:
|
||||
if len(args) == 1 {
|
||||
sz := c.sizeEstimate(args[0])
|
||||
// ResultSize max is when each char is escaped. 2 quote chars always added.
|
||||
return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}}
|
||||
}
|
||||
case overloads.StartsWithString, overloads.EndsWithString:
|
||||
if len(args) == 1 {
|
||||
return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
|
||||
}
|
||||
|
||||
38
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
38
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
@@ -5,7 +5,7 @@
|
||||
// Package cmp determines equality of values.
|
||||
//
|
||||
// This package is intended to be a more powerful and safer alternative to
|
||||
// reflect.DeepEqual for comparing whether two values are semantically equal.
|
||||
// [reflect.DeepEqual] for comparing whether two values are semantically equal.
|
||||
// It is intended to only be used in tests, as performance is not a goal and
|
||||
// it may panic if it cannot compare the values. Its propensity towards
|
||||
// panicking means that its unsuitable for production environments where a
|
||||
@@ -18,16 +18,17 @@
|
||||
// For example, an equality function may report floats as equal so long as
|
||||
// they are within some tolerance of each other.
|
||||
//
|
||||
// - Types with an Equal method may use that method to determine equality.
|
||||
// This allows package authors to determine the equality operation
|
||||
// for the types that they define.
|
||||
// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
|
||||
// to determine equality. This allows package authors to determine
|
||||
// the equality operation for the types that they define.
|
||||
//
|
||||
// - If no custom equality functions are used and no Equal method is defined,
|
||||
// equality is determined by recursively comparing the primitive kinds on
|
||||
// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual,
|
||||
// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
|
||||
// unexported fields are not compared by default; they result in panics
|
||||
// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported)
|
||||
// or explicitly compared using the Exporter option.
|
||||
// unless suppressed by using an [Ignore] option
|
||||
// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
|
||||
// or explicitly compared using the [Exporter] option.
|
||||
package cmp
|
||||
|
||||
import (
|
||||
@@ -45,14 +46,14 @@ import (
|
||||
// Equal reports whether x and y are equal by recursively applying the
|
||||
// following rules in the given order to x and y and all of their sub-values:
|
||||
//
|
||||
// - Let S be the set of all Ignore, Transformer, and Comparer options that
|
||||
// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
|
||||
// remain after applying all path filters, value filters, and type filters.
|
||||
// If at least one Ignore exists in S, then the comparison is ignored.
|
||||
// If the number of Transformer and Comparer options in S is non-zero,
|
||||
// If at least one [Ignore] exists in S, then the comparison is ignored.
|
||||
// If the number of [Transformer] and [Comparer] options in S is non-zero,
|
||||
// then Equal panics because it is ambiguous which option to use.
|
||||
// If S contains a single Transformer, then use that to transform
|
||||
// If S contains a single [Transformer], then use that to transform
|
||||
// the current values and recursively call Equal on the output values.
|
||||
// If S contains a single Comparer, then use that to compare the current values.
|
||||
// If S contains a single [Comparer], then use that to compare the current values.
|
||||
// Otherwise, evaluation proceeds to the next rule.
|
||||
//
|
||||
// - If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||
@@ -66,21 +67,22 @@ import (
|
||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||
//
|
||||
// Structs are equal if recursively calling Equal on all fields report equal.
|
||||
// If a struct contains unexported fields, Equal panics unless an Ignore option
|
||||
// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option
|
||||
// explicitly permits comparing the unexported field.
|
||||
// If a struct contains unexported fields, Equal panics unless an [Ignore] option
|
||||
// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
|
||||
// or the [Exporter] option explicitly permits comparing the unexported field.
|
||||
//
|
||||
// Slices are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored slice or array elements report equal.
|
||||
// Empty non-nil slices and nil slices are not equal; to equate empty slices,
|
||||
// consider using cmpopts.EquateEmpty.
|
||||
// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
|
||||
//
|
||||
// Maps are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored map entries report equal.
|
||||
// Map keys are equal according to the == operator.
|
||||
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
|
||||
// To use custom comparisons for map keys, consider using
|
||||
// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
|
||||
// Empty non-nil maps and nil maps are not equal; to equate empty maps,
|
||||
// consider using cmpopts.EquateEmpty.
|
||||
// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
|
||||
//
|
||||
// Pointers and interfaces are equal if they are both nil or both non-nil,
|
||||
// where they have the same underlying concrete type and recursively
|
||||
|
||||
@@ -2,9 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !purego
|
||||
// +build !purego
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
@@ -12,8 +9,6 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const supportExporters = true
|
||||
|
||||
// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
|
||||
// a struct such that the value has read-write permissions.
|
||||
//
|
||||
16
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
16
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build purego
|
||||
// +build purego
|
||||
|
||||
package cmp
|
||||
|
||||
import "reflect"
|
||||
|
||||
const supportExporters = false
|
||||
|
||||
func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value {
|
||||
panic("no support for forcibly accessing unexported fields")
|
||||
}
|
||||
@@ -2,9 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !purego
|
||||
// +build !purego
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
34
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
34
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build purego
|
||||
// +build purego
|
||||
|
||||
package value
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
||||
type Pointer struct {
|
||||
p uintptr
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// PointerOf returns a Pointer from v, which must be a
|
||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
||||
func PointerOf(v reflect.Value) Pointer {
|
||||
// NOTE: Storing a pointer as an uintptr is technically incorrect as it
|
||||
// assumes that the GC implementation does not use a moving collector.
|
||||
return Pointer{v.Pointer(), v.Type()}
|
||||
}
|
||||
|
||||
// IsNil reports whether the pointer is nil.
|
||||
func (p Pointer) IsNil() bool {
|
||||
return p.p == 0
|
||||
}
|
||||
|
||||
// Uintptr returns the pointer as a uintptr.
|
||||
func (p Pointer) Uintptr() uintptr {
|
||||
return p.p
|
||||
}
|
||||
84
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
84
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
@@ -13,15 +13,15 @@ import (
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
)
|
||||
|
||||
// Option configures for specific behavior of Equal and Diff. In particular,
|
||||
// the fundamental Option functions (Ignore, Transformer, and Comparer),
|
||||
// Option configures for specific behavior of [Equal] and [Diff]. In particular,
|
||||
// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
|
||||
// configure how equality is determined.
|
||||
//
|
||||
// The fundamental options may be composed with filters (FilterPath and
|
||||
// FilterValues) to control the scope over which they are applied.
|
||||
// The fundamental options may be composed with filters ([FilterPath] and
|
||||
// [FilterValues]) to control the scope over which they are applied.
|
||||
//
|
||||
// The cmp/cmpopts package provides helper functions for creating options that
|
||||
// may be used with Equal and Diff.
|
||||
// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
|
||||
// for creating options that may be used with [Equal] and [Diff].
|
||||
type Option interface {
|
||||
// filter applies all filters and returns the option that remains.
|
||||
// Each option may only read s.curPath and call s.callTTBFunc.
|
||||
@@ -56,9 +56,9 @@ type core struct{}
|
||||
|
||||
func (core) isCore() {}
|
||||
|
||||
// Options is a list of Option values that also satisfies the Option interface.
|
||||
// Options is a list of [Option] values that also satisfies the [Option] interface.
|
||||
// Helper comparison packages may return an Options value when packing multiple
|
||||
// Option values into a single Option. When this package processes an Options,
|
||||
// [Option] values into a single [Option]. When this package processes an Options,
|
||||
// it will be implicitly expanded into a flat list.
|
||||
//
|
||||
// Applying a filter on an Options is equivalent to applying that same filter
|
||||
@@ -105,16 +105,16 @@ func (opts Options) String() string {
|
||||
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
|
||||
}
|
||||
|
||||
// FilterPath returns a new Option where opt is only evaluated if filter f
|
||||
// returns true for the current Path in the value tree.
|
||||
// FilterPath returns a new [Option] where opt is only evaluated if filter f
|
||||
// returns true for the current [Path] in the value tree.
|
||||
//
|
||||
// This filter is called even if a slice element or map entry is missing and
|
||||
// provides an opportunity to ignore such cases. The filter function must be
|
||||
// symmetric such that the filter result is identical regardless of whether the
|
||||
// missing value is from x or y.
|
||||
//
|
||||
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
|
||||
// a previously filtered Option.
|
||||
// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
|
||||
// a previously filtered [Option].
|
||||
func FilterPath(f func(Path) bool, opt Option) Option {
|
||||
if f == nil {
|
||||
panic("invalid path filter function")
|
||||
@@ -142,7 +142,7 @@ func (f pathFilter) String() string {
|
||||
return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
|
||||
}
|
||||
|
||||
// FilterValues returns a new Option where opt is only evaluated if filter f,
|
||||
// FilterValues returns a new [Option] where opt is only evaluated if filter f,
|
||||
// which is a function of the form "func(T, T) bool", returns true for the
|
||||
// current pair of values being compared. If either value is invalid or
|
||||
// the type of the values is not assignable to T, then this filter implicitly
|
||||
@@ -154,8 +154,8 @@ func (f pathFilter) String() string {
|
||||
// If T is an interface, it is possible that f is called with two values with
|
||||
// different concrete types that both implement T.
|
||||
//
|
||||
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
|
||||
// a previously filtered Option.
|
||||
// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
|
||||
// a previously filtered [Option].
|
||||
func FilterValues(f interface{}, opt Option) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
|
||||
@@ -192,9 +192,9 @@ func (f valuesFilter) String() string {
|
||||
return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
|
||||
}
|
||||
|
||||
// Ignore is an Option that causes all comparisons to be ignored.
|
||||
// This value is intended to be combined with FilterPath or FilterValues.
|
||||
// It is an error to pass an unfiltered Ignore option to Equal.
|
||||
// Ignore is an [Option] that causes all comparisons to be ignored.
|
||||
// This value is intended to be combined with [FilterPath] or [FilterValues].
|
||||
// It is an error to pass an unfiltered Ignore option to [Equal].
|
||||
func Ignore() Option { return ignore{} }
|
||||
|
||||
type ignore struct{ core }
|
||||
@@ -234,6 +234,8 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
|
||||
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
|
||||
if _, ok := reflect.New(t).Interface().(error); ok {
|
||||
help = "consider using cmpopts.EquateErrors to compare error values"
|
||||
} else if t.Comparable() {
|
||||
help = "consider using cmpopts.EquateComparable to compare comparable Go types"
|
||||
}
|
||||
} else {
|
||||
// Unnamed type with unexported fields. Derive PkgPath from field.
|
||||
@@ -254,7 +256,7 @@ const identRx = `[_\p{L}][_\p{L}\p{N}]*`
|
||||
|
||||
var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
|
||||
|
||||
// Transformer returns an Option that applies a transformation function that
|
||||
// Transformer returns an [Option] that applies a transformation function that
|
||||
// converts values of a certain type into that of another.
|
||||
//
|
||||
// The transformer f must be a function "func(T) R" that converts values of
|
||||
@@ -265,13 +267,14 @@ var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
|
||||
// same transform to the output of itself (e.g., in the case where the
|
||||
// input and output types are the same), an implicit filter is added such that
|
||||
// a transformer is applicable only if that exact transformer is not already
|
||||
// in the tail of the Path since the last non-Transform step.
|
||||
// in the tail of the [Path] since the last non-[Transform] step.
|
||||
// For situations where the implicit filter is still insufficient,
|
||||
// consider using cmpopts.AcyclicTransformer, which adds a filter
|
||||
// to prevent the transformer from being recursively applied upon itself.
|
||||
// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
|
||||
// which adds a filter to prevent the transformer from
|
||||
// being recursively applied upon itself.
|
||||
//
|
||||
// The name is a user provided label that is used as the Transform.Name in the
|
||||
// transformation PathStep (and eventually shown in the Diff output).
|
||||
// The name is a user provided label that is used as the [Transform.Name] in the
|
||||
// transformation [PathStep] (and eventually shown in the [Diff] output).
|
||||
// The name must be a valid identifier or qualified identifier in Go syntax.
|
||||
// If empty, an arbitrary name is used.
|
||||
func Transformer(name string, f interface{}) Option {
|
||||
@@ -329,7 +332,7 @@ func (tr transformer) String() string {
|
||||
return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
|
||||
}
|
||||
|
||||
// Comparer returns an Option that determines whether two values are equal
|
||||
// Comparer returns an [Option] that determines whether two values are equal
|
||||
// to each other.
|
||||
//
|
||||
// The comparer f must be a function "func(T, T) bool" and is implicitly
|
||||
@@ -377,35 +380,32 @@ func (cm comparer) String() string {
|
||||
return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
|
||||
}
|
||||
|
||||
// Exporter returns an Option that specifies whether Equal is allowed to
|
||||
// Exporter returns an [Option] that specifies whether [Equal] is allowed to
|
||||
// introspect into the unexported fields of certain struct types.
|
||||
//
|
||||
// Users of this option must understand that comparing on unexported fields
|
||||
// from external packages is not safe since changes in the internal
|
||||
// implementation of some external package may cause the result of Equal
|
||||
// implementation of some external package may cause the result of [Equal]
|
||||
// to unexpectedly change. However, it may be valid to use this option on types
|
||||
// defined in an internal package where the semantic meaning of an unexported
|
||||
// field is in the control of the user.
|
||||
//
|
||||
// In many cases, a custom Comparer should be used instead that defines
|
||||
// In many cases, a custom [Comparer] should be used instead that defines
|
||||
// equality as a function of the public API of a type rather than the underlying
|
||||
// unexported implementation.
|
||||
//
|
||||
// For example, the reflect.Type documentation defines equality to be determined
|
||||
// For example, the [reflect.Type] documentation defines equality to be determined
|
||||
// by the == operator on the interface (essentially performing a shallow pointer
|
||||
// comparison) and most attempts to compare *regexp.Regexp types are interested
|
||||
// comparison) and most attempts to compare *[regexp.Regexp] types are interested
|
||||
// in only checking that the regular expression strings are equal.
|
||||
// Both of these are accomplished using Comparers:
|
||||
// Both of these are accomplished using [Comparer] options:
|
||||
//
|
||||
// Comparer(func(x, y reflect.Type) bool { return x == y })
|
||||
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
|
||||
//
|
||||
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
|
||||
// all unexported fields on specified struct types.
|
||||
// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
|
||||
// option can be used to ignore all unexported fields on specified struct types.
|
||||
func Exporter(f func(reflect.Type) bool) Option {
|
||||
if !supportExporters {
|
||||
panic("Exporter is not supported on purego builds")
|
||||
}
|
||||
return exporter(f)
|
||||
}
|
||||
|
||||
@@ -415,10 +415,10 @@ func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableO
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// AllowUnexported returns an Options that allows Equal to forcibly introspect
|
||||
// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
|
||||
// unexported fields of the specified struct types.
|
||||
//
|
||||
// See Exporter for the proper use of this option.
|
||||
// See [Exporter] for the proper use of this option.
|
||||
func AllowUnexported(types ...interface{}) Option {
|
||||
m := make(map[reflect.Type]bool)
|
||||
for _, typ := range types {
|
||||
@@ -432,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option {
|
||||
}
|
||||
|
||||
// Result represents the comparison result for a single node and
|
||||
// is provided by cmp when calling Report (see Reporter).
|
||||
// is provided by cmp when calling Report (see [Reporter]).
|
||||
type Result struct {
|
||||
_ [0]func() // Make Result incomparable
|
||||
flags resultFlags
|
||||
@@ -445,7 +445,7 @@ func (r Result) Equal() bool {
|
||||
}
|
||||
|
||||
// ByIgnore reports whether the node is equal because it was ignored.
|
||||
// This never reports true if Equal reports false.
|
||||
// This never reports true if [Result.Equal] reports false.
|
||||
func (r Result) ByIgnore() bool {
|
||||
return r.flags&reportByIgnore != 0
|
||||
}
|
||||
@@ -455,7 +455,7 @@ func (r Result) ByMethod() bool {
|
||||
return r.flags&reportByMethod != 0
|
||||
}
|
||||
|
||||
// ByFunc reports whether a Comparer function determined equality.
|
||||
// ByFunc reports whether a [Comparer] function determined equality.
|
||||
func (r Result) ByFunc() bool {
|
||||
return r.flags&reportByFunc != 0
|
||||
}
|
||||
@@ -478,7 +478,7 @@ const (
|
||||
reportByCycle
|
||||
)
|
||||
|
||||
// Reporter is an Option that can be passed to Equal. When Equal traverses
|
||||
// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
|
||||
// the value trees, it calls PushStep as it descends into each node in the
|
||||
// tree and PopStep as it ascend out of the node. The leaves of the tree are
|
||||
// either compared (determined to be equal or not equal) or ignored and reported
|
||||
|
||||
46
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
46
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
@@ -14,9 +14,9 @@ import (
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// Path is a list of PathSteps describing the sequence of operations to get
|
||||
// Path is a list of [PathStep] describing the sequence of operations to get
|
||||
// from some root type to the current position in the value tree.
|
||||
// The first Path element is always an operation-less PathStep that exists
|
||||
// The first Path element is always an operation-less [PathStep] that exists
|
||||
// simply to identify the initial type.
|
||||
//
|
||||
// When traversing structs with embedded structs, the embedded struct will
|
||||
@@ -29,8 +29,13 @@ type Path []PathStep
|
||||
// a value's tree structure. Users of this package never need to implement
|
||||
// these types as values of this type will be returned by this package.
|
||||
//
|
||||
// Implementations of this interface are
|
||||
// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
|
||||
// Implementations of this interface:
|
||||
// - [StructField]
|
||||
// - [SliceIndex]
|
||||
// - [MapIndex]
|
||||
// - [Indirect]
|
||||
// - [TypeAssertion]
|
||||
// - [Transform]
|
||||
type PathStep interface {
|
||||
String() string
|
||||
|
||||
@@ -70,8 +75,9 @@ func (pa *Path) pop() {
|
||||
*pa = (*pa)[:len(*pa)-1]
|
||||
}
|
||||
|
||||
// Last returns the last PathStep in the Path.
|
||||
// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
|
||||
// Last returns the last [PathStep] in the Path.
|
||||
// If the path is empty, this returns a non-nil [PathStep]
|
||||
// that reports a nil [PathStep.Type].
|
||||
func (pa Path) Last() PathStep {
|
||||
return pa.Index(-1)
|
||||
}
|
||||
@@ -79,7 +85,8 @@ func (pa Path) Last() PathStep {
|
||||
// Index returns the ith step in the Path and supports negative indexing.
|
||||
// A negative index starts counting from the tail of the Path such that -1
|
||||
// refers to the last step, -2 refers to the second-to-last step, and so on.
|
||||
// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
|
||||
// If index is invalid, this returns a non-nil [PathStep]
|
||||
// that reports a nil [PathStep.Type].
|
||||
func (pa Path) Index(i int) PathStep {
|
||||
if i < 0 {
|
||||
i = len(pa) + i
|
||||
@@ -168,7 +175,8 @@ func (ps pathStep) String() string {
|
||||
return fmt.Sprintf("{%s}", s)
|
||||
}
|
||||
|
||||
// StructField represents a struct field access on a field called Name.
|
||||
// StructField is a [PathStep] that represents a struct field access
|
||||
// on a field called [StructField.Name].
|
||||
type StructField struct{ *structField }
|
||||
type structField struct {
|
||||
pathStep
|
||||
@@ -204,10 +212,11 @@ func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
|
||||
func (sf StructField) Name() string { return sf.name }
|
||||
|
||||
// Index is the index of the field in the parent struct type.
|
||||
// See reflect.Type.Field.
|
||||
// See [reflect.Type.Field].
|
||||
func (sf StructField) Index() int { return sf.idx }
|
||||
|
||||
// SliceIndex is an index operation on a slice or array at some index Key.
|
||||
// SliceIndex is a [PathStep] that represents an index operation on
|
||||
// a slice or array at some index [SliceIndex.Key].
|
||||
type SliceIndex struct{ *sliceIndex }
|
||||
type sliceIndex struct {
|
||||
pathStep
|
||||
@@ -247,12 +256,12 @@ func (si SliceIndex) Key() int {
|
||||
// all of the indexes to be shifted. If an index is -1, then that
|
||||
// indicates that the element does not exist in the associated slice.
|
||||
//
|
||||
// Key is guaranteed to return -1 if and only if the indexes returned
|
||||
// by SplitKeys are not the same. SplitKeys will never return -1 for
|
||||
// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
|
||||
// returned by SplitKeys are not the same. SplitKeys will never return -1 for
|
||||
// both indexes.
|
||||
func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
|
||||
|
||||
// MapIndex is an index operation on a map at some index Key.
|
||||
// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
|
||||
type MapIndex struct{ *mapIndex }
|
||||
type mapIndex struct {
|
||||
pathStep
|
||||
@@ -266,7 +275,7 @@ func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]",
|
||||
// Key is the value of the map key.
|
||||
func (mi MapIndex) Key() reflect.Value { return mi.key }
|
||||
|
||||
// Indirect represents pointer indirection on the parent type.
|
||||
// Indirect is a [PathStep] that represents pointer indirection on the parent type.
|
||||
type Indirect struct{ *indirect }
|
||||
type indirect struct {
|
||||
pathStep
|
||||
@@ -276,7 +285,7 @@ func (in Indirect) Type() reflect.Type { return in.typ }
|
||||
func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
|
||||
func (in Indirect) String() string { return "*" }
|
||||
|
||||
// TypeAssertion represents a type assertion on an interface.
|
||||
// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
|
||||
type TypeAssertion struct{ *typeAssertion }
|
||||
type typeAssertion struct {
|
||||
pathStep
|
||||
@@ -286,7 +295,8 @@ func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
||||
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
|
||||
|
||||
// Transform is a transformation from the parent type to the current type.
|
||||
// Transform is a [PathStep] that represents a transformation
|
||||
// from the parent type to the current type.
|
||||
type Transform struct{ *transform }
|
||||
type transform struct {
|
||||
pathStep
|
||||
@@ -297,13 +307,13 @@ func (tf Transform) Type() reflect.Type { return tf.typ }
|
||||
func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
|
||||
func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
|
||||
|
||||
// Name is the name of the Transformer.
|
||||
// Name is the name of the [Transformer].
|
||||
func (tf Transform) Name() string { return tf.trans.name }
|
||||
|
||||
// Func is the function pointer to the transformer function.
|
||||
func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
|
||||
|
||||
// Option returns the originally constructed Transformer option.
|
||||
// Option returns the originally constructed [Transformer] option.
|
||||
// The == operator can be used to detect the exact option used.
|
||||
func (tf Transform) Option() Option { return tf.trans }
|
||||
|
||||
|
||||
2
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
@@ -199,7 +199,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||
break
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if supportExporters && !isExported(sf.Name) {
|
||||
if !isExported(sf.Name) {
|
||||
vv = retrieveUnexportedField(v, sf, true)
|
||||
}
|
||||
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
|
||||
|
||||
9
vendor/github.com/google/uuid/.travis.yml
generated
vendored
9
vendor/github.com/google/uuid/.travis.yml
generated
vendored
@@ -1,9 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4.3
|
||||
- 1.5.3
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go test -v ./...
|
||||
10
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
Normal file
10
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# Changelog
|
||||
|
||||
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
|
||||
|
||||
## Changelog
|
||||
16
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
16
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
@@ -2,6 +2,22 @@
|
||||
|
||||
We definitely welcome patches and contribution to this project!
|
||||
|
||||
### Tips
|
||||
|
||||
Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
|
||||
|
||||
Always try to include a test case! If it is not possible or not necessary,
|
||||
please explain why in the pull request description.
|
||||
|
||||
### Releasing
|
||||
|
||||
Commits that would precipitate a SemVer change, as desrcibed in the Conventional
|
||||
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
|
||||
to create a release candidate pull request. Once submitted, `release-please`
|
||||
will create a release.
|
||||
|
||||
For tips on how to work with `release-please`, see its documentation.
|
||||
|
||||
### Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
|
||||
10
vendor/github.com/google/uuid/README.md
generated
vendored
10
vendor/github.com/google/uuid/README.md
generated
vendored
@@ -1,6 +1,6 @@
|
||||
# uuid 
|
||||
# uuid
|
||||
The uuid package generates and inspects UUIDs based on
|
||||
[RFC 4122](http://tools.ietf.org/html/rfc4122)
|
||||
[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
|
||||
and DCE 1.1: Authentication and Security Services.
|
||||
|
||||
This package is based on the github.com/pborman/uuid package (previously named
|
||||
@@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this
|
||||
change is the ability to represent an invalid UUID (vs a NIL UUID).
|
||||
|
||||
###### Install
|
||||
`go get github.com/google/uuid`
|
||||
```sh
|
||||
go get github.com/google/uuid
|
||||
```
|
||||
|
||||
###### Documentation
|
||||
[](http://godoc.org/github.com/google/uuid)
|
||||
[](https://pkg.go.dev/github.com/google/uuid)
|
||||
|
||||
Full `go doc` style documentation for the package can be viewed online without
|
||||
installing this package by using the GoDoc site here:
|
||||
|
||||
2
vendor/github.com/google/uuid/node_js.go
generated
vendored
2
vendor/github.com/google/uuid/node_js.go
generated
vendored
@@ -7,6 +7,6 @@
|
||||
package uuid
|
||||
|
||||
// getHardwareInterface returns nil values for the JS version of the code.
|
||||
// This remvoves the "net" dependency, because it is not used in the browser.
|
||||
// This removes the "net" dependency, because it is not used in the browser.
|
||||
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
|
||||
func getHardwareInterface(name string) (string, []byte) { return "", nil }
|
||||
|
||||
10
vendor/github.com/google/uuid/uuid.go
generated
vendored
10
vendor/github.com/google/uuid/uuid.go
generated
vendored
@@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) {
|
||||
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9:
|
||||
if strings.ToLower(s[:9]) != "urn:uuid:" {
|
||||
if !strings.EqualFold(s[:9], "urn:uuid:") {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
|
||||
}
|
||||
s = s[9:]
|
||||
@@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) {
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34} {
|
||||
24, 26, 28, 30, 32, 34,
|
||||
} {
|
||||
v, ok := xtob(s[x], s[x+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
@@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) {
|
||||
switch len(b) {
|
||||
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
|
||||
if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
|
||||
}
|
||||
b = b[9:]
|
||||
@@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) {
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34} {
|
||||
24, 26, 28, 30, 32, 34,
|
||||
} {
|
||||
v, ok := xtob(b[x], b[x+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
|
||||
4
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
generated
vendored
4
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
generated
vendored
@@ -1,10 +1,10 @@
|
||||
//go:build gofuzz
|
||||
// +build gofuzz
|
||||
|
||||
package httprule
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
_, err := Parse(string(data))
|
||||
if err != nil {
|
||||
if _, err := Parse(string(data)); err != nil {
|
||||
return 0
|
||||
}
|
||||
return 0
|
||||
|
||||
30
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
30
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package httprule
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
@@ -164,9 +165,9 @@ func (p *parser) segment() (segment, error) {
|
||||
|
||||
v, err := p.variable()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err)
|
||||
return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err)
|
||||
}
|
||||
return v, err
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (p *parser) literal() (segment, error) {
|
||||
@@ -191,7 +192,7 @@ func (p *parser) variable() (segment, error) {
|
||||
if _, err := p.accept("="); err == nil {
|
||||
segs, err = p.segments()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err)
|
||||
return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err)
|
||||
}
|
||||
} else {
|
||||
segs = []segment{wildcard{}}
|
||||
@@ -213,12 +214,12 @@ func (p *parser) fieldPath() (string, error) {
|
||||
}
|
||||
components := []string{c}
|
||||
for {
|
||||
if _, err = p.accept("."); err != nil {
|
||||
if _, err := p.accept("."); err != nil {
|
||||
return strings.Join(components, "."), nil
|
||||
}
|
||||
c, err := p.accept(typeIdent)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid field path component: %v", err)
|
||||
return "", fmt.Errorf("invalid field path component: %w", err)
|
||||
}
|
||||
components = append(components, c)
|
||||
}
|
||||
@@ -237,10 +238,8 @@ const (
|
||||
typeEOF = termType("$")
|
||||
)
|
||||
|
||||
const (
|
||||
// eof is the terminal symbol which always appears at the end of token sequence.
|
||||
eof = "\u0000"
|
||||
)
|
||||
// eof is the terminal symbol which always appears at the end of token sequence.
|
||||
const eof = "\u0000"
|
||||
|
||||
// accept tries to accept a token in "p".
|
||||
// This function consumes a token and returns it if it matches to the specified "term".
|
||||
@@ -275,11 +274,12 @@ func (p *parser) accept(term termType) (string, error) {
|
||||
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
|
||||
//
|
||||
// https://www.ietf.org/rfc/rfc3986.txt, P.49
|
||||
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
// / "*" / "+" / "," / ";" / "="
|
||||
// pct-encoded = "%" HEXDIG HEXDIG
|
||||
//
|
||||
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
// / "*" / "+" / "," / ";" / "="
|
||||
// pct-encoded = "%" HEXDIG HEXDIG
|
||||
func expectPChars(t string) error {
|
||||
const (
|
||||
init = iota
|
||||
@@ -333,7 +333,7 @@ func expectPChars(t string) error {
|
||||
// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
|
||||
func expectIdent(ident string) error {
|
||||
if ident == "" {
|
||||
return fmt.Errorf("empty identifier")
|
||||
return errors.New("empty identifier")
|
||||
}
|
||||
for pos, r := range ident {
|
||||
switch {
|
||||
|
||||
10
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
10
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
@@ -27,9 +27,9 @@ go_library(
|
||||
"//internal/httprule",
|
||||
"//utilities",
|
||||
"@go_googleapis//google/api:httpbody_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//grpclog",
|
||||
"@org_golang_google_grpc//health/grpc_health_v1",
|
||||
"@org_golang_google_grpc//metadata",
|
||||
"@org_golang_google_grpc//status",
|
||||
"@org_golang_google_protobuf//encoding/protojson",
|
||||
@@ -37,6 +37,8 @@ go_library(
|
||||
"@org_golang_google_protobuf//reflect/protoreflect",
|
||||
"@org_golang_google_protobuf//reflect/protoregistry",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/fieldmaskpb",
|
||||
"@org_golang_google_protobuf//types/known/structpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||
],
|
||||
@@ -56,8 +58,10 @@ go_test(
|
||||
"marshal_jsonpb_test.go",
|
||||
"marshal_proto_test.go",
|
||||
"marshaler_registry_test.go",
|
||||
"mux_internal_test.go",
|
||||
"mux_test.go",
|
||||
"pattern_test.go",
|
||||
"query_fuzz_test.go",
|
||||
"query_test.go",
|
||||
],
|
||||
embed = [":runtime"],
|
||||
@@ -69,8 +73,9 @@ go_test(
|
||||
"@go_googleapis//google/api:httpbody_go_proto",
|
||||
"@go_googleapis//google/rpc:errdetails_go_proto",
|
||||
"@go_googleapis//google/rpc:status_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//health/grpc_health_v1",
|
||||
"@org_golang_google_grpc//metadata",
|
||||
"@org_golang_google_grpc//status",
|
||||
"@org_golang_google_protobuf//encoding/protojson",
|
||||
@@ -78,6 +83,7 @@ go_test(
|
||||
"@org_golang_google_protobuf//testing/protocmp",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/emptypb",
|
||||
"@org_golang_google_protobuf//types/known/fieldmaskpb",
|
||||
"@org_golang_google_protobuf//types/known/structpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||
|
||||
72
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
72
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -35,11 +36,15 @@ const metadataHeaderBinarySuffix = "-Bin"
|
||||
const xForwardedFor = "X-Forwarded-For"
|
||||
const xForwardedHost = "X-Forwarded-Host"
|
||||
|
||||
var (
|
||||
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
|
||||
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
|
||||
DefaultContextTimeout = 0 * time.Second
|
||||
)
|
||||
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
|
||||
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
|
||||
var DefaultContextTimeout = 0 * time.Second
|
||||
|
||||
// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
|
||||
// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
|
||||
var malformedHTTPHeaders = map[string]struct{}{
|
||||
"connection": {},
|
||||
}
|
||||
|
||||
type (
|
||||
rpcMethodKey struct{}
|
||||
@@ -95,12 +100,43 @@ func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Reque
|
||||
return metadata.NewIncomingContext(ctx, md), nil
|
||||
}
|
||||
|
||||
func isValidGRPCMetadataKey(key string) bool {
|
||||
// Must be a valid gRPC "Header-Name" as defined here:
|
||||
// https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
|
||||
// This means 0-9 a-z _ - .
|
||||
// Only lowercase letters are valid in the wire protocol, but the client library will normalize
|
||||
// uppercase ASCII to lowercase, so uppercase ASCII is also acceptable.
|
||||
bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode.
|
||||
for _, ch := range bytes {
|
||||
validLowercaseLetter := ch >= 'a' && ch <= 'z'
|
||||
validUppercaseLetter := ch >= 'A' && ch <= 'Z'
|
||||
validDigit := ch >= '0' && ch <= '9'
|
||||
validOther := ch == '.' || ch == '-' || ch == '_'
|
||||
if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isValidGRPCMetadataTextValue(textValue string) bool {
|
||||
// Must be a valid gRPC "ASCII-Value" as defined here:
|
||||
// https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
|
||||
// This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive.
|
||||
bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode.
|
||||
for _, ch := range bytes {
|
||||
if ch < 0x20 || ch > 0x7E {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
|
||||
ctx = withRPCMethod(ctx, rpcMethodName)
|
||||
for _, o := range options {
|
||||
ctx = o(ctx)
|
||||
}
|
||||
var pairs []string
|
||||
timeout := DefaultContextTimeout
|
||||
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
|
||||
var err error
|
||||
@@ -109,7 +145,7 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
|
||||
}
|
||||
}
|
||||
|
||||
var pairs []string
|
||||
for key, vals := range req.Header {
|
||||
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||
for _, val := range vals {
|
||||
@@ -118,6 +154,10 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
|
||||
pairs = append(pairs, "authorization", val)
|
||||
}
|
||||
if h, ok := mux.incomingHeaderMatcher(key); ok {
|
||||
if !isValidGRPCMetadataKey(h) {
|
||||
grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h)
|
||||
continue
|
||||
}
|
||||
// Handles "-bin" metadata in grpc, since grpc will do another base64
|
||||
// encode before sending to server, we need to decode it first.
|
||||
if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
|
||||
@@ -127,6 +167,9 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
|
||||
}
|
||||
|
||||
val = string(b)
|
||||
} else if !isValidGRPCMetadataTextValue(val) {
|
||||
grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h)
|
||||
continue
|
||||
}
|
||||
pairs = append(pairs, h, val)
|
||||
}
|
||||
@@ -172,11 +215,17 @@ type serverMetadataKey struct{}
|
||||
|
||||
// NewServerMetadataContext creates a new context with ServerMetadata
|
||||
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
return context.WithValue(ctx, serverMetadataKey{}, md)
|
||||
}
|
||||
|
||||
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
||||
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
||||
if ctx == nil {
|
||||
return md, false
|
||||
}
|
||||
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
||||
return
|
||||
}
|
||||
@@ -269,8 +318,8 @@ func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
|
||||
case 'n':
|
||||
return time.Nanosecond, true
|
||||
default:
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// isPermanentHTTPHeader checks whether hdr belongs to the list of
|
||||
@@ -308,6 +357,13 @@ func isPermanentHTTPHeader(hdr string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// isMalformedHTTPHeader checks whether header belongs to the list of
|
||||
// "malformed headers" and would be rejected by the gRPC server.
|
||||
func isMalformedHTTPHeader(header string) bool {
|
||||
_, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
|
||||
return isMalformed
|
||||
}
|
||||
|
||||
// RPCMethod returns the method string for the server context. The returned
|
||||
// string is in the format of "/package.service/method".
|
||||
func RPCMethod(ctx context.Context) (string, bool) {
|
||||
|
||||
46
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
46
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
@@ -37,7 +37,7 @@ func BoolSlice(val, sep string) ([]bool, error) {
|
||||
for i, v := range s {
|
||||
value, err := Bool(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
return nil, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
@@ -57,7 +57,7 @@ func Float64Slice(val, sep string) ([]float64, error) {
|
||||
for i, v := range s {
|
||||
value, err := Float64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
return nil, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func Float32Slice(val, sep string) ([]float32, error) {
|
||||
for i, v := range s {
|
||||
value, err := Float32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
return nil, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
@@ -101,7 +101,7 @@ func Int64Slice(val, sep string) ([]int64, error) {
|
||||
for i, v := range s {
|
||||
value, err := Int64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
return nil, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func Int32Slice(val, sep string) ([]int32, error) {
|
||||
for i, v := range s {
|
||||
value, err := Int32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
return nil, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
@@ -145,7 +145,7 @@ func Uint64Slice(val, sep string) ([]uint64, error) {
|
||||
for i, v := range s {
|
||||
value, err := Uint64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
return nil, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
@@ -169,7 +169,7 @@ func Uint32Slice(val, sep string) ([]uint32, error) {
|
||||
for i, v := range s {
|
||||
value, err := Uint32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
return nil, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
@@ -197,7 +197,7 @@ func BytesSlice(val, sep string) ([][]byte, error) {
|
||||
for i, v := range s {
|
||||
value, err := Bytes(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
return nil, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
@@ -209,8 +209,7 @@ func Timestamp(val string) (*timestamppb.Timestamp, error) {
|
||||
var r timestamppb.Timestamp
|
||||
val = strconv.Quote(strings.Trim(val, `"`))
|
||||
unmarshaler := &protojson.UnmarshalOptions{}
|
||||
err := unmarshaler.Unmarshal([]byte(val), &r)
|
||||
if err != nil {
|
||||
if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
@@ -221,8 +220,7 @@ func Duration(val string) (*durationpb.Duration, error) {
|
||||
var r durationpb.Duration
|
||||
val = strconv.Quote(strings.Trim(val, `"`))
|
||||
unmarshaler := &protojson.UnmarshalOptions{}
|
||||
err := unmarshaler.Unmarshal([]byte(val), &r)
|
||||
if err != nil {
|
||||
if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
@@ -257,66 +255,64 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
|
||||
for i, v := range s {
|
||||
value, err := Enum(v, enumValMap)
|
||||
if err != nil {
|
||||
return values, err
|
||||
return nil, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Support fot google.protobuf.wrappers on top of primitive types
|
||||
*/
|
||||
// Support for google.protobuf.wrappers on top of primitive types
|
||||
|
||||
// StringValue well-known type support as wrapper around string type
|
||||
func StringValue(val string) (*wrapperspb.StringValue, error) {
|
||||
return &wrapperspb.StringValue{Value: val}, nil
|
||||
return wrapperspb.String(val), nil
|
||||
}
|
||||
|
||||
// FloatValue well-known type support as wrapper around float32 type
|
||||
func FloatValue(val string) (*wrapperspb.FloatValue, error) {
|
||||
parsedVal, err := Float32(val)
|
||||
return &wrapperspb.FloatValue{Value: parsedVal}, err
|
||||
return wrapperspb.Float(parsedVal), err
|
||||
}
|
||||
|
||||
// DoubleValue well-known type support as wrapper around float64 type
|
||||
func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
|
||||
parsedVal, err := Float64(val)
|
||||
return &wrapperspb.DoubleValue{Value: parsedVal}, err
|
||||
return wrapperspb.Double(parsedVal), err
|
||||
}
|
||||
|
||||
// BoolValue well-known type support as wrapper around bool type
|
||||
func BoolValue(val string) (*wrapperspb.BoolValue, error) {
|
||||
parsedVal, err := Bool(val)
|
||||
return &wrapperspb.BoolValue{Value: parsedVal}, err
|
||||
return wrapperspb.Bool(parsedVal), err
|
||||
}
|
||||
|
||||
// Int32Value well-known type support as wrapper around int32 type
|
||||
func Int32Value(val string) (*wrapperspb.Int32Value, error) {
|
||||
parsedVal, err := Int32(val)
|
||||
return &wrapperspb.Int32Value{Value: parsedVal}, err
|
||||
return wrapperspb.Int32(parsedVal), err
|
||||
}
|
||||
|
||||
// UInt32Value well-known type support as wrapper around uint32 type
|
||||
func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
|
||||
parsedVal, err := Uint32(val)
|
||||
return &wrapperspb.UInt32Value{Value: parsedVal}, err
|
||||
return wrapperspb.UInt32(parsedVal), err
|
||||
}
|
||||
|
||||
// Int64Value well-known type support as wrapper around int64 type
|
||||
func Int64Value(val string) (*wrapperspb.Int64Value, error) {
|
||||
parsedVal, err := Int64(val)
|
||||
return &wrapperspb.Int64Value{Value: parsedVal}, err
|
||||
return wrapperspb.Int64(parsedVal), err
|
||||
}
|
||||
|
||||
// UInt64Value well-known type support as wrapper around uint64 type
|
||||
func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
|
||||
parsedVal, err := Uint64(val)
|
||||
return &wrapperspb.UInt64Value{Value: parsedVal}, err
|
||||
return wrapperspb.UInt64(parsedVal), err
|
||||
}
|
||||
|
||||
// BytesValue well-known type support as wrapper around bytes[] type
|
||||
func BytesValue(val string) (*wrapperspb.BytesValue, error) {
|
||||
parsedVal, err := Bytes(val)
|
||||
return &wrapperspb.BytesValue{Value: parsedVal}, err
|
||||
return wrapperspb.Bytes(parsedVal), err
|
||||
}
|
||||
|
||||
17
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
17
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
@@ -38,7 +38,7 @@ func HTTPStatusFromCode(code codes.Code) int {
|
||||
case codes.OK:
|
||||
return http.StatusOK
|
||||
case codes.Canceled:
|
||||
return http.StatusRequestTimeout
|
||||
return 499
|
||||
case codes.Unknown:
|
||||
return http.StatusInternalServerError
|
||||
case codes.InvalidArgument:
|
||||
@@ -70,10 +70,10 @@ func HTTPStatusFromCode(code codes.Code) int {
|
||||
return http.StatusServiceUnavailable
|
||||
case codes.DataLoss:
|
||||
return http.StatusInternalServerError
|
||||
default:
|
||||
grpclog.Infof("Unknown gRPC error code: %v", code)
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
grpclog.Infof("Unknown gRPC error code: %v", code)
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
// HTTPError uses the mux-configured error handler.
|
||||
@@ -162,10 +162,11 @@ func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
|
||||
|
||||
// DefaultRoutingErrorHandler is our default handler for routing errors.
|
||||
// By default http error codes mapped on the following error codes:
|
||||
// NotFound -> grpc.NotFound
|
||||
// StatusBadRequest -> grpc.InvalidArgument
|
||||
// MethodNotAllowed -> grpc.Unimplemented
|
||||
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
||||
//
|
||||
// NotFound -> grpc.NotFound
|
||||
// StatusBadRequest -> grpc.InvalidArgument
|
||||
// MethodNotAllowed -> grpc.Unimplemented
|
||||
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
||||
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
|
||||
sterr := status.Error(codes.Internal, "Unexpected routing error")
|
||||
switch httpStatus {
|
||||
|
||||
9
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
9
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
@@ -2,13 +2,14 @@ package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"google.golang.org/genproto/protobuf/field_mask"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||
)
|
||||
|
||||
func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
|
||||
@@ -44,7 +45,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
|
||||
// if the item is an object, then enqueue all of its children
|
||||
for k, v := range m {
|
||||
if item.msg == nil {
|
||||
return nil, fmt.Errorf("JSON structure did not match request type")
|
||||
return nil, errors.New("JSON structure did not match request type")
|
||||
}
|
||||
|
||||
fd := getFieldByName(item.msg.Descriptor().Fields(), k)
|
||||
@@ -53,7 +54,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
|
||||
}
|
||||
|
||||
if isDynamicProtoMessage(fd.Message()) {
|
||||
for _, p := range buildPathsBlindly(k, v) {
|
||||
for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
|
||||
newPath := p
|
||||
if item.path != "" {
|
||||
newPath = item.path + "." + newPath
|
||||
@@ -63,7 +64,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
|
||||
continue
|
||||
}
|
||||
|
||||
if isProtobufAnyMessage(fd.Message()) {
|
||||
if isProtobufAnyMessage(fd.Message()) && !fd.IsList() {
|
||||
_, hasTypeField := v.(map[string]interface{})["@type"]
|
||||
if hasTypeField {
|
||||
queue = append(queue, fieldMaskPathItem{path: k})
|
||||
|
||||
26
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
26
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
@@ -52,11 +52,11 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||
return
|
||||
}
|
||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -82,15 +82,15 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
|
||||
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to marshal response chunk: %v", err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||
return
|
||||
}
|
||||
if _, err = w.Write(buf); err != nil {
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to send response chunk: %v", err)
|
||||
return
|
||||
}
|
||||
wroteHeader = true
|
||||
if _, err = w.Write(delimiter); err != nil {
|
||||
if _, err := w.Write(delimiter); err != nil {
|
||||
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
||||
return
|
||||
}
|
||||
@@ -200,20 +200,24 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
|
||||
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
|
||||
st := mux.streamErrorHandler(ctx, err)
|
||||
msg := errorChunk(st)
|
||||
if !wroteHeader {
|
||||
w.Header().Set("Content-Type", marshaler.ContentType(msg))
|
||||
w.WriteHeader(HTTPStatusFromCode(st.Code()))
|
||||
}
|
||||
buf, merr := marshaler.Marshal(msg)
|
||||
if merr != nil {
|
||||
grpclog.Infof("Failed to marshal an error: %v", merr)
|
||||
buf, err := marshaler.Marshal(msg)
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to marshal an error: %v", err)
|
||||
return
|
||||
}
|
||||
if _, werr := w.Write(buf); werr != nil {
|
||||
grpclog.Infof("Failed to notify error to client: %v", werr)
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to notify error to client: %v", err)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(delimiter); err != nil {
|
||||
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
38
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
38
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
@@ -92,23 +92,20 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||
|
||||
if rv.Type().Elem().Implements(protoMessageType) {
|
||||
var buf bytes.Buffer
|
||||
err := buf.WriteByte('[')
|
||||
if err != nil {
|
||||
if err := buf.WriteByte('['); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
if i != 0 {
|
||||
err = buf.WriteByte(',')
|
||||
if err != nil {
|
||||
if err := buf.WriteByte(','); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
|
||||
if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = buf.WriteByte(']')
|
||||
if err != nil {
|
||||
if err := buf.WriteByte(']'); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -117,17 +114,16 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||
|
||||
if rv.Type().Elem().Implements(typeProtoEnum) {
|
||||
var buf bytes.Buffer
|
||||
err := buf.WriteByte('[')
|
||||
if err != nil {
|
||||
if err := buf.WriteByte('['); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
if i != 0 {
|
||||
err = buf.WriteByte(',')
|
||||
if err != nil {
|
||||
if err := buf.WriteByte(','); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if j.UseEnumNumbers {
|
||||
_, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
|
||||
} else {
|
||||
@@ -137,8 +133,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = buf.WriteByte(']')
|
||||
if err != nil {
|
||||
if err := buf.WriteByte(']'); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -219,8 +214,7 @@ func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v int
|
||||
|
||||
// Decode into bytes for marshalling
|
||||
var b json.RawMessage
|
||||
err := d.Decode(&b)
|
||||
if err != nil {
|
||||
if err := d.Decode(&b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -239,8 +233,7 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions
|
||||
if rv.Type().ConvertibleTo(typeProtoMessage) {
|
||||
// Decode into bytes for marshalling
|
||||
var b json.RawMessage
|
||||
err := d.Decode(&b)
|
||||
if err != nil {
|
||||
if err := d.Decode(&b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -280,6 +273,17 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions
|
||||
return nil
|
||||
}
|
||||
if rv.Kind() == reflect.Slice {
|
||||
if rv.Type().Elem().Kind() == reflect.Uint8 {
|
||||
var sl []byte
|
||||
if err := d.Decode(&sl); err != nil {
|
||||
return err
|
||||
}
|
||||
if sl != nil {
|
||||
rv.SetBytes(sl)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var sl []json.RawMessage
|
||||
if err := d.Decode(&sl); err != nil {
|
||||
return err
|
||||
|
||||
9
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
generated
vendored
9
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
generated
vendored
@@ -1,10 +1,8 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -38,7 +36,7 @@ func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
|
||||
// NewDecoder returns a Decoder which reads proto stream from "reader".
|
||||
func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
|
||||
return DecoderFunc(func(value interface{}) error {
|
||||
buffer, err := ioutil.ReadAll(reader)
|
||||
buffer, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -53,8 +51,7 @@ func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = writer.Write(buffer)
|
||||
if err != nil {
|
||||
if _, err := writer.Write(buffer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
162
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
generated
vendored
162
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
generated
vendored
@@ -6,10 +6,13 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -23,15 +26,15 @@ const (
|
||||
// path string before doing any routing.
|
||||
UnescapingModeLegacy UnescapingMode = iota
|
||||
|
||||
// EscapingTypeExceptReserved unescapes all path parameters except RFC 6570
|
||||
// UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
|
||||
// reserved characters.
|
||||
UnescapingModeAllExceptReserved
|
||||
|
||||
// EscapingTypeExceptSlash unescapes URL path parameters except path
|
||||
// seperators, which will be left as "%2F".
|
||||
// UnescapingModeAllExceptSlash unescapes URL path parameters except path
|
||||
// separators, which will be left as "%2F".
|
||||
UnescapingModeAllExceptSlash
|
||||
|
||||
// URL path parameters will be fully decoded.
|
||||
// UnescapingModeAllCharacters unescapes all URL path parameters.
|
||||
UnescapingModeAllCharacters
|
||||
|
||||
// UnescapingModeDefault is the default escaping type.
|
||||
@@ -40,6 +43,8 @@ const (
|
||||
UnescapingModeDefault = UnescapingModeLegacy
|
||||
)
|
||||
|
||||
var encodedPathSplitter = regexp.MustCompile("(/|%2F)")
|
||||
|
||||
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
||||
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
||||
|
||||
@@ -75,7 +80,7 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http.
|
||||
}
|
||||
}
|
||||
|
||||
// WithEscapingType sets the escaping type. See the definitions of UnescapingMode
|
||||
// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode
|
||||
// for more information.
|
||||
func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
@@ -96,13 +101,14 @@ func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMux
|
||||
type HeaderMatcherFunc func(string) (string, bool)
|
||||
|
||||
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
|
||||
// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
|
||||
// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
|
||||
// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function.
|
||||
// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'.
|
||||
// Other headers are not added to the gRPC metadata.
|
||||
func DefaultHeaderMatcher(key string) (string, bool) {
|
||||
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||
if isPermanentHTTPHeader(key) {
|
||||
switch key = textproto.CanonicalMIMEHeaderKey(key); {
|
||||
case isPermanentHTTPHeader(key):
|
||||
return MetadataPrefix + key, true
|
||||
} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
|
||||
case strings.HasPrefix(key, MetadataHeaderPrefix):
|
||||
return key[len(MetadataHeaderPrefix):], true
|
||||
}
|
||||
return "", false
|
||||
@@ -113,11 +119,30 @@ func DefaultHeaderMatcher(key string) (string, bool) {
|
||||
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
|
||||
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
|
||||
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||
for _, header := range fn.matchedMalformedHeaders() {
|
||||
grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
|
||||
}
|
||||
|
||||
return func(mux *ServeMux) {
|
||||
mux.incomingHeaderMatcher = fn
|
||||
}
|
||||
}
|
||||
|
||||
// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
|
||||
func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
|
||||
if fn == nil {
|
||||
return nil
|
||||
}
|
||||
headers := make([]string, 0)
|
||||
for header := range malformedHTTPHeaders {
|
||||
out, accept := fn(header)
|
||||
if accept && isMalformedHTTPHeader(out) {
|
||||
headers = append(headers, out)
|
||||
}
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
|
||||
//
|
||||
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
|
||||
@@ -179,6 +204,56 @@ func WithDisablePathLengthFallback() ServeMuxOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
|
||||
// When called the handler will forward the request to the upstream grpc service health check (defined in the
|
||||
// gRPC Health Checking Protocol).
|
||||
//
|
||||
// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
|
||||
// to setup the protocol in the grpc server.
|
||||
//
|
||||
// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
|
||||
func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
|
||||
return func(s *ServeMux) {
|
||||
// error can be ignored since pattern is definitely valid
|
||||
_ = s.HandlePath(
|
||||
http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
|
||||
) {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
|
||||
resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
|
||||
Service: r.URL.Query().Get("service"),
|
||||
})
|
||||
if err != nil {
|
||||
s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
|
||||
switch resp.GetStatus() {
|
||||
case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
|
||||
err = status.Error(codes.Unavailable, resp.String())
|
||||
case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
|
||||
err = status.Error(codes.NotFound, resp.String())
|
||||
}
|
||||
|
||||
s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
_ = outboundMarshaler.NewEncoder(w).Encode(resp)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
|
||||
//
|
||||
// See WithHealthEndpointAt for the general implementation.
|
||||
func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
|
||||
return WithHealthEndpointAt(healthCheckClient, "/healthz")
|
||||
}
|
||||
|
||||
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
||||
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
||||
serveMux := &ServeMux{
|
||||
@@ -229,7 +304,7 @@ func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) er
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
|
||||
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
|
||||
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@@ -245,8 +320,6 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
path = r.URL.RawPath
|
||||
}
|
||||
|
||||
components := strings.Split(path[1:], "/")
|
||||
|
||||
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
|
||||
r.Method = strings.ToUpper(override)
|
||||
if err := r.ParseForm(); err != nil {
|
||||
@@ -257,8 +330,18 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Verb out here is to memoize for the fallback case below
|
||||
var verb string
|
||||
var pathComponents []string
|
||||
// since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
|
||||
// in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
|
||||
// path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
|
||||
// behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
|
||||
if s.unescapingMode == UnescapingModeAllCharacters {
|
||||
pathComponents = encodedPathSplitter.Split(path[1:], -1)
|
||||
} else {
|
||||
pathComponents = strings.Split(path[1:], "/")
|
||||
}
|
||||
|
||||
lastPathComponent := pathComponents[len(pathComponents)-1]
|
||||
|
||||
for _, h := range s.handlers[r.Method] {
|
||||
// If the pattern has a verb, explicitly look for a suffix in the last
|
||||
@@ -269,23 +352,28 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// parser because we know what verb we're looking for, however, there
|
||||
// are still some cases that the parser itself cannot disambiguate. See
|
||||
// the comment there if interested.
|
||||
|
||||
var verb string
|
||||
patVerb := h.pat.Verb()
|
||||
l := len(components)
|
||||
lastComponent := components[l-1]
|
||||
var idx int = -1
|
||||
if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) {
|
||||
idx = len(lastComponent) - len(patVerb) - 1
|
||||
|
||||
idx := -1
|
||||
if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
|
||||
idx = len(lastPathComponent) - len(patVerb) - 1
|
||||
}
|
||||
if idx == 0 {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
comps := make([]string, len(pathComponents))
|
||||
copy(comps, pathComponents)
|
||||
|
||||
if idx > 0 {
|
||||
components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:]
|
||||
comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
|
||||
}
|
||||
|
||||
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
|
||||
pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
|
||||
if err != nil {
|
||||
var mse MalformedSequenceError
|
||||
if ok := errors.As(err, &mse); ok {
|
||||
@@ -301,14 +389,33 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// lookup other methods to handle fallback from GET to POST and
|
||||
// to determine if it is NotImplemented or NotFound.
|
||||
// if no handler has found for the request, lookup for other methods
|
||||
// to handle POST -> GET fallback if the request is subject to path
|
||||
// length fallback.
|
||||
// Note we are not eagerly checking the request here as we want to return the
|
||||
// right HTTP status code, and we need to process the fallback candidates in
|
||||
// order to do that.
|
||||
for m, handlers := range s.handlers {
|
||||
if m == r.Method {
|
||||
continue
|
||||
}
|
||||
for _, h := range handlers {
|
||||
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
|
||||
var verb string
|
||||
patVerb := h.pat.Verb()
|
||||
|
||||
idx := -1
|
||||
if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
|
||||
idx = len(lastPathComponent) - len(patVerb) - 1
|
||||
}
|
||||
|
||||
comps := make([]string, len(pathComponents))
|
||||
copy(comps, pathComponents)
|
||||
|
||||
if idx > 0 {
|
||||
comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
|
||||
}
|
||||
|
||||
pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
|
||||
if err != nil {
|
||||
var mse MalformedSequenceError
|
||||
if ok := errors.As(err, &mse); ok {
|
||||
@@ -320,8 +427,11 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
|
||||
if s.isPathLengthFallback(r) {
|
||||
// Also, only consider POST -> GET fallbacks, and avoid falling back to
|
||||
// potentially dangerous operations like DELETE.
|
||||
if s.isPathLengthFallback(r) && m == http.MethodGet {
|
||||
if err := r.ParseForm(); err != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||
|
||||
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
generated
vendored
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
generated
vendored
@@ -15,8 +15,6 @@ var (
|
||||
ErrNotMatch = errors.New("not match to the path pattern")
|
||||
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
|
||||
ErrInvalidPattern = errors.New("invalid pattern")
|
||||
// ErrMalformedSequence indicates that an escape sequence was malformed.
|
||||
ErrMalformedSequence = errors.New("malformed escape sequence")
|
||||
)
|
||||
|
||||
type MalformedSequenceError string
|
||||
|
||||
69
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
69
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
@@ -11,19 +10,21 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/genproto/protobuf/field_mask"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||
)
|
||||
|
||||
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
|
||||
|
||||
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
|
||||
var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
|
||||
|
||||
// QueryParameterParser defines interface for all query parameter parsers
|
||||
type QueryParameterParser interface {
|
||||
@@ -36,14 +37,17 @@ func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utili
|
||||
return currentQueryParser.Parse(msg, values, filter)
|
||||
}
|
||||
|
||||
type defaultQueryParser struct{}
|
||||
// DefaultQueryParser is a QueryParameterParser which implements the default
|
||||
// query parameters parsing behavior.
|
||||
//
|
||||
// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
|
||||
type DefaultQueryParser struct{}
|
||||
|
||||
// Parse populates "values" into "msg".
|
||||
// A value is ignored if its key starts with one of the elements in "filter".
|
||||
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||
func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||
for key, values := range values {
|
||||
match := valuesKeyRegexp.FindStringSubmatch(key)
|
||||
if len(match) == 3 {
|
||||
if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 {
|
||||
key = match[1]
|
||||
values = append([]string{match[2]}, values...)
|
||||
}
|
||||
@@ -175,10 +179,10 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
|
||||
return protoreflect.ValueOfBool(v), nil
|
||||
case protoreflect.EnumKind:
|
||||
enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
|
||||
switch {
|
||||
case errors.Is(err, protoregistry.NotFound):
|
||||
return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
|
||||
case err != nil:
|
||||
if err != nil {
|
||||
if errors.Is(err, protoregistry.NotFound) {
|
||||
return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
|
||||
}
|
||||
return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
|
||||
}
|
||||
// Look for enum by name
|
||||
@@ -189,8 +193,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
|
||||
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
|
||||
}
|
||||
// Look for enum by number
|
||||
v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i))
|
||||
if v == nil {
|
||||
if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil {
|
||||
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
|
||||
}
|
||||
}
|
||||
@@ -234,7 +237,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
|
||||
case protoreflect.StringKind:
|
||||
return protoreflect.ValueOfString(value), nil
|
||||
case protoreflect.BytesKind:
|
||||
v, err := base64.URLEncoding.DecodeString(value)
|
||||
v, err := Bytes(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
@@ -250,18 +253,12 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
|
||||
var msg proto.Message
|
||||
switch msgDescriptor.FullName() {
|
||||
case "google.protobuf.Timestamp":
|
||||
if value == "null" {
|
||||
break
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339Nano, value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = timestamppb.New(t)
|
||||
case "google.protobuf.Duration":
|
||||
if value == "null" {
|
||||
break
|
||||
}
|
||||
d, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
@@ -272,55 +269,67 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.DoubleValue{Value: v}
|
||||
msg = wrapperspb.Double(v)
|
||||
case "google.protobuf.FloatValue":
|
||||
v, err := strconv.ParseFloat(value, 32)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.FloatValue{Value: float32(v)}
|
||||
msg = wrapperspb.Float(float32(v))
|
||||
case "google.protobuf.Int64Value":
|
||||
v, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.Int64Value{Value: v}
|
||||
msg = wrapperspb.Int64(v)
|
||||
case "google.protobuf.Int32Value":
|
||||
v, err := strconv.ParseInt(value, 10, 32)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.Int32Value{Value: int32(v)}
|
||||
msg = wrapperspb.Int32(int32(v))
|
||||
case "google.protobuf.UInt64Value":
|
||||
v, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.UInt64Value{Value: v}
|
||||
msg = wrapperspb.UInt64(v)
|
||||
case "google.protobuf.UInt32Value":
|
||||
v, err := strconv.ParseUint(value, 10, 32)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.UInt32Value{Value: uint32(v)}
|
||||
msg = wrapperspb.UInt32(uint32(v))
|
||||
case "google.protobuf.BoolValue":
|
||||
v, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.BoolValue{Value: v}
|
||||
msg = wrapperspb.Bool(v)
|
||||
case "google.protobuf.StringValue":
|
||||
msg = &wrapperspb.StringValue{Value: value}
|
||||
msg = wrapperspb.String(value)
|
||||
case "google.protobuf.BytesValue":
|
||||
v, err := base64.URLEncoding.DecodeString(value)
|
||||
v, err := Bytes(value)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &wrapperspb.BytesValue{Value: v}
|
||||
msg = wrapperspb.Bytes(v)
|
||||
case "google.protobuf.FieldMask":
|
||||
fm := &field_mask.FieldMask{}
|
||||
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
|
||||
msg = fm
|
||||
case "google.protobuf.Value":
|
||||
var v structpb.Value
|
||||
if err := protojson.Unmarshal([]byte(value), &v); err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &v
|
||||
case "google.protobuf.Struct":
|
||||
var v structpb.Struct
|
||||
if err := protojson.Unmarshal([]byte(value), &v); err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
msg = &v
|
||||
default:
|
||||
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
|
||||
}
|
||||
|
||||
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
@@ -8,6 +8,7 @@ go_library(
|
||||
"doc.go",
|
||||
"pattern.go",
|
||||
"readerfactory.go",
|
||||
"string_array_flag.go",
|
||||
"trie.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
|
||||
@@ -16,7 +17,10 @@ go_library(
|
||||
go_test(
|
||||
name = "utilities_test",
|
||||
size = "small",
|
||||
srcs = ["trie_test.go"],
|
||||
srcs = [
|
||||
"string_array_flag_test.go",
|
||||
"trie_test.go",
|
||||
],
|
||||
deps = [":utilities"],
|
||||
)
|
||||
|
||||
|
||||
3
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
generated
vendored
3
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
generated
vendored
@@ -3,13 +3,12 @@ package utilities
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
|
||||
// at the start of the stream
|
||||
func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
b, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
33
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
generated
vendored
Normal file
33
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// flagInterface is an cut down interface to `flag`
|
||||
type flagInterface interface {
|
||||
Var(value flag.Value, name string, usage string)
|
||||
}
|
||||
|
||||
// StringArrayFlag defines a flag with the specified name and usage string.
|
||||
// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
|
||||
func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
|
||||
value := &StringArrayFlags{}
|
||||
f.Var(value, name, usage)
|
||||
return value
|
||||
}
|
||||
|
||||
// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
|
||||
type StringArrayFlags []string
|
||||
|
||||
// String returns a string representation of `StringArrayFlags`
|
||||
func (i *StringArrayFlags) String() string {
|
||||
return strings.Join(*i, ",")
|
||||
}
|
||||
|
||||
// Set appends a value to `StringArrayFlags`
|
||||
func (i *StringArrayFlags) Set(value string) error {
|
||||
*i = append(*i, value)
|
||||
return nil
|
||||
}
|
||||
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
generated
vendored
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
generated
vendored
@@ -40,7 +40,7 @@ func NewDoubleArray(seqs [][]string) *DoubleArray {
|
||||
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
|
||||
var result [][]int
|
||||
for _, seq := range seqs {
|
||||
var encoded []int
|
||||
encoded := make([]int, 0, len(seq))
|
||||
for _, token := range seq {
|
||||
if _, ok := da.Encoding[token]; !ok {
|
||||
da.Encoding[token] = len(da.Encoding)
|
||||
|
||||
8
vendor/github.com/spf13/cobra/.golangci.yml
generated
vendored
8
vendor/github.com/spf13/cobra/.golangci.yml
generated
vendored
@@ -19,7 +19,7 @@ linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
#- bodyclose
|
||||
- deadcode
|
||||
# - deadcode ! deprecated since v1.49.0; replaced by 'unused'
|
||||
#- depguard
|
||||
#- dogsled
|
||||
#- dupl
|
||||
@@ -51,12 +51,12 @@ linters:
|
||||
#- rowserrcheck
|
||||
#- scopelint
|
||||
#- staticcheck
|
||||
- structcheck
|
||||
#- structcheck ! deprecated since v1.49.0; replaced by 'unused'
|
||||
#- stylecheck
|
||||
#- typecheck
|
||||
- unconvert
|
||||
#- unparam
|
||||
#- unused
|
||||
- varcheck
|
||||
- unused
|
||||
# - varcheck ! deprecated since v1.49.0; replaced by 'unused'
|
||||
#- whitespace
|
||||
fast: false
|
||||
|
||||
8
vendor/github.com/spf13/cobra/README.md
generated
vendored
8
vendor/github.com/spf13/cobra/README.md
generated
vendored
@@ -4,7 +4,7 @@ Cobra is a library for creating powerful modern CLI applications.
|
||||
|
||||
Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
|
||||
[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to
|
||||
name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra.
|
||||
name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra.
|
||||
|
||||
[](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
|
||||
[](https://pkg.go.dev/github.com/spf13/cobra)
|
||||
@@ -80,7 +80,7 @@ which maintains the same interface while adding POSIX compliance.
|
||||
|
||||
# Installing
|
||||
Using Cobra is easy. First, use `go get` to install the latest version
|
||||
of the library.
|
||||
of the library.
|
||||
|
||||
```
|
||||
go get -u github.com/spf13/cobra@latest
|
||||
@@ -105,8 +105,8 @@ go install github.com/spf13/cobra-cli@latest
|
||||
|
||||
For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
|
||||
|
||||
For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md).
|
||||
For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md).
|
||||
|
||||
# License
|
||||
|
||||
Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
|
||||
Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt)
|
||||
|
||||
10
vendor/github.com/spf13/cobra/active_help.go
generated
vendored
10
vendor/github.com/spf13/cobra/active_help.go
generated
vendored
@@ -17,6 +17,7 @@ package cobra
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -29,6 +30,8 @@ const (
|
||||
activeHelpGlobalDisable = "0"
|
||||
)
|
||||
|
||||
var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`)
|
||||
|
||||
// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp.
|
||||
// Such strings will be processed by the completion script and will be shown as ActiveHelp
|
||||
// to the user.
|
||||
@@ -42,7 +45,7 @@ func AppendActiveHelp(compArray []string, activeHelpStr string) []string {
|
||||
|
||||
// GetActiveHelpConfig returns the value of the ActiveHelp environment variable
|
||||
// <PROGRAM>_ACTIVE_HELP where <PROGRAM> is the name of the root command in upper
|
||||
// case, with all - replaced by _.
|
||||
// case, with all non-ASCII-alphanumeric characters replaced by `_`.
|
||||
// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP
|
||||
// is set to "0".
|
||||
func GetActiveHelpConfig(cmd *Command) string {
|
||||
@@ -55,9 +58,10 @@ func GetActiveHelpConfig(cmd *Command) string {
|
||||
|
||||
// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment
|
||||
// variable. It has the format <PROGRAM>_ACTIVE_HELP where <PROGRAM> is the name of the
|
||||
// root command in upper case, with all - replaced by _.
|
||||
// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`.
|
||||
func activeHelpEnvVar(name string) string {
|
||||
// This format should not be changed: users will be using it explicitly.
|
||||
activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix))
|
||||
return strings.ReplaceAll(activeHelpEnvVar, "-", "_")
|
||||
activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_")
|
||||
return activeHelpEnvVar
|
||||
}
|
||||
|
||||
157
vendor/github.com/spf13/cobra/active_help.md
generated
vendored
157
vendor/github.com/spf13/cobra/active_help.md
generated
vendored
@@ -1,157 +0,0 @@
|
||||
# Active Help
|
||||
|
||||
Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion.
|
||||
|
||||
For example,
|
||||
```
|
||||
bash-5.1$ helm repo add [tab]
|
||||
You must choose a name for the repo you are adding.
|
||||
|
||||
bash-5.1$ bin/helm package [tab]
|
||||
Please specify the path to the chart to package
|
||||
|
||||
bash-5.1$ bin/helm package [tab][tab]
|
||||
bin/ internal/ scripts/ pkg/ testdata/
|
||||
```
|
||||
|
||||
**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program.
|
||||
## Supported shells
|
||||
|
||||
Active Help is currently only supported for the following shells:
|
||||
- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed.
|
||||
- Zsh
|
||||
|
||||
## Adding Active Help messages
|
||||
|
||||
As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md).
|
||||
|
||||
Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details.
|
||||
|
||||
### Active Help for nouns
|
||||
|
||||
Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example:
|
||||
|
||||
```go
|
||||
cmd := &cobra.Command{
|
||||
Use: "add [NAME] [URL]",
|
||||
Short: "add a chart repository",
|
||||
Args: require.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return addRepo(args)
|
||||
},
|
||||
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
var comps []string
|
||||
if len(args) == 0 {
|
||||
comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding")
|
||||
} else if len(args) == 1 {
|
||||
comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding")
|
||||
} else {
|
||||
comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments")
|
||||
}
|
||||
return comps, cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
}
|
||||
```
|
||||
The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior:
|
||||
```
|
||||
bash-5.1$ helm repo add [tab]
|
||||
You must choose a name for the repo you are adding
|
||||
|
||||
bash-5.1$ helm repo add grafana [tab]
|
||||
You must specify the URL for the repo you are adding
|
||||
|
||||
bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab]
|
||||
This command does not take any more arguments
|
||||
```
|
||||
**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions.
|
||||
|
||||
### Active Help for flags
|
||||
|
||||
Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example:
|
||||
```go
|
||||
_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
if len(args) != 2 {
|
||||
return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
return compVersionFlag(args[1], toComplete)
|
||||
})
|
||||
```
|
||||
The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag.
|
||||
```
|
||||
bash-5.1$ bin/helm install myrelease --version 2.0.[tab]
|
||||
You must first specify the chart to install before the --version flag can be completed
|
||||
|
||||
bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab]
|
||||
2.0.1 2.0.2 2.0.3
|
||||
```
|
||||
|
||||
## User control of Active Help
|
||||
|
||||
You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any.
|
||||
Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration.
|
||||
|
||||
The way to configure Active Help is to use the program's Active Help environment
|
||||
variable. That variable is named `<PROGRAM>_ACTIVE_HELP` where `<PROGRAM>` is the name of your
|
||||
program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever
|
||||
Active Help configuration values are supported by the program.
|
||||
|
||||
For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user
|
||||
would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell.
|
||||
|
||||
For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the
|
||||
Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages
|
||||
should or should not be added (instead of reading the environment variable directly).
|
||||
|
||||
For example:
|
||||
```go
|
||||
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
activeHelpLevel := cobra.GetActiveHelpConfig(cmd)
|
||||
|
||||
var comps []string
|
||||
if len(args) == 0 {
|
||||
if activeHelpLevel != "off" {
|
||||
comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding")
|
||||
}
|
||||
} else if len(args) == 1 {
|
||||
if activeHelpLevel != "off" {
|
||||
comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding")
|
||||
}
|
||||
} else {
|
||||
if activeHelpLevel == "local" {
|
||||
comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments")
|
||||
}
|
||||
}
|
||||
return comps, cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
```
|
||||
**Note 1**: If the `<PROGRAM>_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly.
|
||||
|
||||
**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `<PROGRAM>_ACTIVE_HELP` is set to.
|
||||
|
||||
**Note 3**: If the user does not set `<PROGRAM>_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string.
|
||||
## Active Help with Cobra's default completion command
|
||||
|
||||
Cobra provides a default `completion` command for programs that wish to use it.
|
||||
When using the default `completion` command, Active Help is configurable in the same
|
||||
fashion as described above using environment variables. You may wish to document this in more
|
||||
details for your users.
|
||||
|
||||
## Debugging Active Help
|
||||
|
||||
Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details.
|
||||
|
||||
When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `<PROGRAM>_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below:
|
||||
```
|
||||
$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h<ENTER>
|
||||
bitnami/haproxy
|
||||
bitnami/harbor
|
||||
_activeHelp_ WARNING: cannot re-use a name that is still in use
|
||||
:0
|
||||
Completion ended with directive: ShellCompDirectiveDefault
|
||||
|
||||
$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h<ENTER>
|
||||
bitnami/haproxy
|
||||
bitnami/harbor
|
||||
:0
|
||||
Completion ended with directive: ShellCompDirectiveDefault
|
||||
```
|
||||
2
vendor/github.com/spf13/cobra/bash_completions.go
generated
vendored
2
vendor/github.com/spf13/cobra/bash_completions.go
generated
vendored
@@ -85,7 +85,7 @@ __%[1]s_handle_go_custom_completion()
|
||||
local out requestComp lastParam lastChar comp directive args
|
||||
|
||||
# Prepare the command to request completions for the program.
|
||||
# Calling ${words[0]} instead of directly %[1]s allows to handle aliases
|
||||
# Calling ${words[0]} instead of directly %[1]s allows handling aliases
|
||||
args=("${words[@]:1}")
|
||||
# Disable ActiveHelp which is not supported for bash completion v1
|
||||
requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}"
|
||||
|
||||
93
vendor/github.com/spf13/cobra/bash_completions.md
generated
vendored
93
vendor/github.com/spf13/cobra/bash_completions.md
generated
vendored
@@ -1,93 +0,0 @@
|
||||
# Generating Bash Completions For Your cobra.Command
|
||||
|
||||
Please refer to [Shell Completions](shell_completions.md) for details.
|
||||
|
||||
## Bash legacy dynamic completions
|
||||
|
||||
For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution.
|
||||
|
||||
**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own.
|
||||
|
||||
The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions.
|
||||
|
||||
Some code that works in kubernetes:
|
||||
|
||||
```bash
|
||||
const (
|
||||
bash_completion_func = `__kubectl_parse_get()
|
||||
{
|
||||
local kubectl_output out
|
||||
if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
|
||||
out=($(echo "${kubectl_output}" | awk '{print $1}'))
|
||||
COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
|
||||
fi
|
||||
}
|
||||
|
||||
__kubectl_get_resource()
|
||||
{
|
||||
if [[ ${#nouns[@]} -eq 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
__kubectl_parse_get ${nouns[${#nouns[@]} -1]}
|
||||
if [[ $? -eq 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
__kubectl_custom_func() {
|
||||
case ${last_command} in
|
||||
kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
|
||||
__kubectl_get_resource
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
}
|
||||
`)
|
||||
```
|
||||
|
||||
And then I set that in my command definition:
|
||||
|
||||
```go
|
||||
cmds := &cobra.Command{
|
||||
Use: "kubectl",
|
||||
Short: "kubectl controls the Kubernetes cluster manager",
|
||||
Long: `kubectl controls the Kubernetes cluster manager.
|
||||
|
||||
Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
|
||||
Run: runHelp,
|
||||
BashCompletionFunction: bash_completion_func,
|
||||
}
|
||||
```
|
||||
|
||||
The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`__<command-use>_custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
|
||||
|
||||
Similarly, for flags:
|
||||
|
||||
```go
|
||||
annotation := make(map[string][]string)
|
||||
annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"}
|
||||
|
||||
flag := &pflag.Flag{
|
||||
Name: "namespace",
|
||||
Usage: usage,
|
||||
Annotations: annotation,
|
||||
}
|
||||
cmd.Flags().AddFlag(flag)
|
||||
```
|
||||
|
||||
In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction`
|
||||
value, e.g.:
|
||||
|
||||
```bash
|
||||
__kubectl_get_namespaces()
|
||||
{
|
||||
local template
|
||||
template="{{ range .items }}{{ .metadata.name }} {{ end }}"
|
||||
local kubectl_out
|
||||
if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
|
||||
COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
|
||||
fi
|
||||
}
|
||||
```
|
||||
2
vendor/github.com/spf13/cobra/bash_completionsV2.go
generated
vendored
2
vendor/github.com/spf13/cobra/bash_completionsV2.go
generated
vendored
@@ -57,7 +57,7 @@ __%[1]s_get_completion_results() {
|
||||
local requestComp lastParam lastChar args
|
||||
|
||||
# Prepare the command to request completions for the program.
|
||||
# Calling ${words[0]} instead of directly %[1]s allows to handle aliases
|
||||
# Calling ${words[0]} instead of directly %[1]s allows handling aliases
|
||||
args=("${words[@]:1}")
|
||||
requestComp="${words[0]} %[2]s ${args[*]}"
|
||||
|
||||
|
||||
13
vendor/github.com/spf13/cobra/cobra.go
generated
vendored
13
vendor/github.com/spf13/cobra/cobra.go
generated
vendored
@@ -43,12 +43,13 @@ var initializers []func()
|
||||
var finalizers []func()
|
||||
|
||||
const (
|
||||
defaultPrefixMatching = false
|
||||
defaultCommandSorting = true
|
||||
defaultCaseInsensitive = false
|
||||
defaultPrefixMatching = false
|
||||
defaultCommandSorting = true
|
||||
defaultCaseInsensitive = false
|
||||
defaultTraverseRunHooks = false
|
||||
)
|
||||
|
||||
// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
|
||||
// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing
|
||||
// to automatically enable in CLI tools.
|
||||
// Set this to true to enable it.
|
||||
var EnablePrefixMatching = defaultPrefixMatching
|
||||
@@ -60,6 +61,10 @@ var EnableCommandSorting = defaultCommandSorting
|
||||
// EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default)
|
||||
var EnableCaseInsensitive = defaultCaseInsensitive
|
||||
|
||||
// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents.
|
||||
// By default this is disabled, which means only the first run hook to be found is executed.
|
||||
var EnableTraverseRunHooks = defaultTraverseRunHooks
|
||||
|
||||
// MousetrapHelpText enables an information splash screen on Windows
|
||||
// if the CLI is started from explorer.exe.
|
||||
// To disable the mousetrap, just set this variable to blank string ("").
|
||||
|
||||
69
vendor/github.com/spf13/cobra/command.go
generated
vendored
69
vendor/github.com/spf13/cobra/command.go
generated
vendored
@@ -30,7 +30,10 @@ import (
|
||||
flag "github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra"
|
||||
const (
|
||||
FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra"
|
||||
CommandDisplayNameAnnotation = "cobra_annotation_command_display_name"
|
||||
)
|
||||
|
||||
// FParseErrWhitelist configures Flag parse errors to be ignored
|
||||
type FParseErrWhitelist flag.ParseErrorsWhitelist
|
||||
@@ -99,7 +102,7 @@ type Command struct {
|
||||
Deprecated string
|
||||
|
||||
// Annotations are key/value pairs that can be used by applications to identify or
|
||||
// group commands.
|
||||
// group commands or set special options.
|
||||
Annotations map[string]string
|
||||
|
||||
// Version defines the version for this command. If this value is non-empty and the command does not
|
||||
@@ -115,6 +118,8 @@ type Command struct {
|
||||
// * PostRun()
|
||||
// * PersistentPostRun()
|
||||
// All functions get the same args, the arguments after the command name.
|
||||
// The *PreRun and *PostRun functions will only be executed if the Run function of the current
|
||||
// command has been declared.
|
||||
//
|
||||
// PersistentPreRun: children of this command will inherit and execute.
|
||||
PersistentPreRun func(cmd *Command, args []string)
|
||||
@@ -181,6 +186,9 @@ type Command struct {
|
||||
// versionTemplate is the version template defined by user.
|
||||
versionTemplate string
|
||||
|
||||
// errPrefix is the error message prefix defined by user.
|
||||
errPrefix string
|
||||
|
||||
// inReader is a reader defined by the user that replaces stdin
|
||||
inReader io.Reader
|
||||
// outWriter is a writer defined by the user that replaces stdout
|
||||
@@ -346,6 +354,11 @@ func (c *Command) SetVersionTemplate(s string) {
|
||||
c.versionTemplate = s
|
||||
}
|
||||
|
||||
// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix.
|
||||
func (c *Command) SetErrPrefix(s string) {
|
||||
c.errPrefix = s
|
||||
}
|
||||
|
||||
// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
|
||||
// The user should not have a cyclic dependency on commands.
|
||||
func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
|
||||
@@ -595,6 +608,18 @@ func (c *Command) VersionTemplate() string {
|
||||
`
|
||||
}
|
||||
|
||||
// ErrPrefix return error message prefix for the command
|
||||
func (c *Command) ErrPrefix() string {
|
||||
if c.errPrefix != "" {
|
||||
return c.errPrefix
|
||||
}
|
||||
|
||||
if c.HasParent() {
|
||||
return c.parent.ErrPrefix()
|
||||
}
|
||||
return "Error:"
|
||||
}
|
||||
|
||||
func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
|
||||
flag := fs.Lookup(name)
|
||||
if flag == nil {
|
||||
@@ -752,7 +777,9 @@ func (c *Command) findNext(next string) *Command {
|
||||
}
|
||||
|
||||
if len(matches) == 1 {
|
||||
return matches[0]
|
||||
// Temporarily disable gosec G602, which produces a false positive.
|
||||
// See https://github.com/securego/gosec/issues/1005.
|
||||
return matches[0] // #nosec G602
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -910,15 +937,31 @@ func (c *Command) execute(a []string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
parents := make([]*Command, 0, 5)
|
||||
for p := c; p != nil; p = p.Parent() {
|
||||
if EnableTraverseRunHooks {
|
||||
// When EnableTraverseRunHooks is set:
|
||||
// - Execute all persistent pre-runs from the root parent till this command.
|
||||
// - Execute all persistent post-runs from this command till the root parent.
|
||||
parents = append([]*Command{p}, parents...)
|
||||
} else {
|
||||
// Otherwise, execute only the first found persistent hook.
|
||||
parents = append(parents, p)
|
||||
}
|
||||
}
|
||||
for _, p := range parents {
|
||||
if p.PersistentPreRunE != nil {
|
||||
if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
if !EnableTraverseRunHooks {
|
||||
break
|
||||
}
|
||||
} else if p.PersistentPreRun != nil {
|
||||
p.PersistentPreRun(c, argWoFlags)
|
||||
break
|
||||
if !EnableTraverseRunHooks {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.PreRunE != nil {
|
||||
@@ -955,10 +998,14 @@ func (c *Command) execute(a []string) (err error) {
|
||||
if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
if !EnableTraverseRunHooks {
|
||||
break
|
||||
}
|
||||
} else if p.PersistentPostRun != nil {
|
||||
p.PersistentPostRun(c, argWoFlags)
|
||||
break
|
||||
if !EnableTraverseRunHooks {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1048,7 +1095,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
|
||||
c = cmd
|
||||
}
|
||||
if !c.SilenceErrors {
|
||||
c.PrintErrln("Error:", err.Error())
|
||||
c.PrintErrln(c.ErrPrefix(), err.Error())
|
||||
c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath())
|
||||
}
|
||||
return c, err
|
||||
@@ -1077,7 +1124,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
|
||||
// If root command has SilenceErrors flagged,
|
||||
// all subcommands should respect it
|
||||
if !cmd.SilenceErrors && !c.SilenceErrors {
|
||||
c.PrintErrln("Error:", err.Error())
|
||||
c.PrintErrln(cmd.ErrPrefix(), err.Error())
|
||||
}
|
||||
|
||||
// If root command has SilenceUsage flagged,
|
||||
@@ -1380,6 +1427,9 @@ func (c *Command) CommandPath() string {
|
||||
if c.HasParent() {
|
||||
return c.Parent().CommandPath() + " " + c.Name()
|
||||
}
|
||||
if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok {
|
||||
return displayName
|
||||
}
|
||||
return c.Name()
|
||||
}
|
||||
|
||||
@@ -1402,6 +1452,7 @@ func (c *Command) UseLine() string {
|
||||
|
||||
// DebugFlags used to determine which flags have been assigned to which commands
|
||||
// and which persist.
|
||||
// nolint:goconst
|
||||
func (c *Command) DebugFlags() {
|
||||
c.Println("DebugFlags called on", c.Name())
|
||||
var debugflags func(*Command)
|
||||
|
||||
29
vendor/github.com/spf13/cobra/completions.go
generated
vendored
29
vendor/github.com/spf13/cobra/completions.go
generated
vendored
@@ -145,6 +145,20 @@ func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Comman
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available.
|
||||
func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) {
|
||||
flag := c.Flag(flagName)
|
||||
if flag == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
flagCompletionMutex.RLock()
|
||||
defer flagCompletionMutex.RUnlock()
|
||||
|
||||
completionFunc, exists := flagCompletionFunctions[flag]
|
||||
return completionFunc, exists
|
||||
}
|
||||
|
||||
// Returns a string listing the different directive enabled in the specified parameter
|
||||
func (d ShellCompDirective) string() string {
|
||||
var directives []string
|
||||
@@ -283,9 +297,13 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
|
||||
|
||||
// These flags are normally added when `execute()` is called on `finalCmd`,
|
||||
// however, when doing completion, we don't call `finalCmd.execute()`.
|
||||
// Let's add the --help and --version flag ourselves.
|
||||
finalCmd.InitDefaultHelpFlag()
|
||||
finalCmd.InitDefaultVersionFlag()
|
||||
// Let's add the --help and --version flag ourselves but only if the finalCmd
|
||||
// has not disabled flag parsing; if flag parsing is disabled, it is up to the
|
||||
// finalCmd itself to handle the completion of *all* flags.
|
||||
if !finalCmd.DisableFlagParsing {
|
||||
finalCmd.InitDefaultHelpFlag()
|
||||
finalCmd.InitDefaultVersionFlag()
|
||||
}
|
||||
|
||||
// Check if we are doing flag value completion before parsing the flags.
|
||||
// This is important because if we are completing a flag value, we need to also
|
||||
@@ -389,6 +407,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
|
||||
finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||
doCompleteFlags(flag)
|
||||
})
|
||||
// Try to complete non-inherited flags even if DisableFlagParsing==true.
|
||||
// This allows programs to tell Cobra about flags for completion even
|
||||
// if the actual parsing of flags is not done by Cobra.
|
||||
// For instance, Helm uses this to provide flag name completion for
|
||||
// some of its plugins.
|
||||
finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||
doCompleteFlags(flag)
|
||||
})
|
||||
|
||||
17
vendor/github.com/spf13/cobra/doc/README.md
generated
vendored
17
vendor/github.com/spf13/cobra/doc/README.md
generated
vendored
@@ -1,17 +0,0 @@
|
||||
# Documentation generation
|
||||
|
||||
- [Man page docs](./man_docs.md)
|
||||
- [Markdown docs](./md_docs.md)
|
||||
- [Rest docs](./rest_docs.md)
|
||||
- [Yaml docs](./yaml_docs.md)
|
||||
|
||||
## Options
|
||||
### `DisableAutoGenTag`
|
||||
|
||||
You may set `cmd.DisableAutoGenTag = true`
|
||||
to _entirely_ remove the auto generated string "Auto generated by spf13/cobra..."
|
||||
from any documentation source.
|
||||
|
||||
### `InitDefaultCompletionCmd`
|
||||
|
||||
You may call `cmd.InitDefaultCompletionCmd()` to document the default autocompletion command.
|
||||
31
vendor/github.com/spf13/cobra/doc/man_docs.md
generated
vendored
31
vendor/github.com/spf13/cobra/doc/man_docs.md
generated
vendored
@@ -1,31 +0,0 @@
|
||||
# Generating Man Pages For Your Own cobra.Command
|
||||
|
||||
Generating man pages from a cobra command is incredibly easy. An example is as follows:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd := &cobra.Command{
|
||||
Use: "test",
|
||||
Short: "my test program",
|
||||
}
|
||||
header := &doc.GenManHeader{
|
||||
Title: "MINE",
|
||||
Section: "3",
|
||||
}
|
||||
err := doc.GenManTree(cmd, header, "/tmp")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
That will get you a man page `/tmp/test.3`
|
||||
8
vendor/github.com/spf13/cobra/doc/md_docs.go
generated
vendored
8
vendor/github.com/spf13/cobra/doc/md_docs.go
generated
vendored
@@ -27,6 +27,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const markdownExtension = ".md"
|
||||
|
||||
func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error {
|
||||
flags := cmd.NonInheritedFlags()
|
||||
flags.SetOutput(buf)
|
||||
@@ -83,7 +85,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string)
|
||||
if cmd.HasParent() {
|
||||
parent := cmd.Parent()
|
||||
pname := parent.CommandPath()
|
||||
link := pname + ".md"
|
||||
link := pname + markdownExtension
|
||||
link = strings.ReplaceAll(link, " ", "_")
|
||||
buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short))
|
||||
cmd.VisitParents(func(c *cobra.Command) {
|
||||
@@ -101,7 +103,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string)
|
||||
continue
|
||||
}
|
||||
cname := name + " " + child.Name()
|
||||
link := cname + ".md"
|
||||
link := cname + markdownExtension
|
||||
link = strings.ReplaceAll(link, " ", "_")
|
||||
buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short))
|
||||
}
|
||||
@@ -138,7 +140,7 @@ func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHa
|
||||
}
|
||||
}
|
||||
|
||||
basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".md"
|
||||
basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + markdownExtension
|
||||
filename := filepath.Join(dir, basename)
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
|
||||
115
vendor/github.com/spf13/cobra/doc/md_docs.md
generated
vendored
115
vendor/github.com/spf13/cobra/doc/md_docs.md
generated
vendored
@@ -1,115 +0,0 @@
|
||||
# Generating Markdown Docs For Your Own cobra.Command
|
||||
|
||||
Generating Markdown pages from a cobra command is incredibly easy. An example is as follows:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd := &cobra.Command{
|
||||
Use: "test",
|
||||
Short: "my test program",
|
||||
}
|
||||
err := doc.GenMarkdownTree(cmd, "/tmp")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
That will get you a Markdown document `/tmp/test.md`
|
||||
|
||||
## Generate markdown docs for the entire command tree
|
||||
|
||||
This program can actually generate docs for the kubectl command in the kubernetes project
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
|
||||
err := doc.GenMarkdownTree(kubectl, "./")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
|
||||
|
||||
## Generate markdown docs for a single command
|
||||
|
||||
You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree`
|
||||
|
||||
```go
|
||||
out := new(bytes.Buffer)
|
||||
err := doc.GenMarkdown(cmd, out)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
This will write the markdown doc for ONLY "cmd" into the out, buffer.
|
||||
|
||||
## Customize the output
|
||||
|
||||
Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output:
|
||||
|
||||
```go
|
||||
func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error {
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error {
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/):
|
||||
|
||||
```go
|
||||
const fmTemplate = `---
|
||||
date: %s
|
||||
title: "%s"
|
||||
slug: %s
|
||||
url: %s
|
||||
---
|
||||
`
|
||||
|
||||
filePrepender := func(filename string) string {
|
||||
now := time.Now().Format(time.RFC3339)
|
||||
name := filepath.Base(filename)
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
url := "/commands/" + strings.ToLower(base) + "/"
|
||||
return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
|
||||
}
|
||||
```
|
||||
|
||||
The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename:
|
||||
|
||||
```go
|
||||
linkHandler := func(name string) string {
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
return "/commands/" + strings.ToLower(base) + "/"
|
||||
}
|
||||
```
|
||||
114
vendor/github.com/spf13/cobra/doc/rest_docs.md
generated
vendored
114
vendor/github.com/spf13/cobra/doc/rest_docs.md
generated
vendored
@@ -1,114 +0,0 @@
|
||||
# Generating ReStructured Text Docs For Your Own cobra.Command
|
||||
|
||||
Generating ReST pages from a cobra command is incredibly easy. An example is as follows:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd := &cobra.Command{
|
||||
Use: "test",
|
||||
Short: "my test program",
|
||||
}
|
||||
err := doc.GenReSTTree(cmd, "/tmp")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
That will get you a ReST document `/tmp/test.rst`
|
||||
|
||||
## Generate ReST docs for the entire command tree
|
||||
|
||||
This program can actually generate docs for the kubectl command in the kubernetes project
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
|
||||
err := doc.GenReSTTree(kubectl, "./")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
|
||||
|
||||
## Generate ReST docs for a single command
|
||||
|
||||
You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenReST` instead of `GenReSTTree`
|
||||
|
||||
```go
|
||||
out := new(bytes.Buffer)
|
||||
err := doc.GenReST(cmd, out)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
This will write the ReST doc for ONLY "cmd" into the out, buffer.
|
||||
|
||||
## Customize the output
|
||||
|
||||
Both `GenReST` and `GenReSTTree` have alternate versions with callbacks to get some control of the output:
|
||||
|
||||
```go
|
||||
func GenReSTTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error {
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
func GenReSTCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string, string) string) error {
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
The `filePrepender` will prepend the return value given the full filepath to the rendered ReST file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/):
|
||||
|
||||
```go
|
||||
const fmTemplate = `---
|
||||
date: %s
|
||||
title: "%s"
|
||||
slug: %s
|
||||
url: %s
|
||||
---
|
||||
`
|
||||
filePrepender := func(filename string) string {
|
||||
now := time.Now().Format(time.RFC3339)
|
||||
name := filepath.Base(filename)
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
url := "/commands/" + strings.ToLower(base) + "/"
|
||||
return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
|
||||
}
|
||||
```
|
||||
|
||||
The `linkHandler` can be used to customize the rendered links to the commands, given a command name and reference. This is useful while converting rst to html or while generating documentation with tools like Sphinx where `:ref:` is used:
|
||||
|
||||
```go
|
||||
// Sphinx cross-referencing format
|
||||
linkHandler := func(name, ref string) string {
|
||||
return fmt.Sprintf(":ref:`%s <%s>`", name, ref)
|
||||
}
|
||||
```
|
||||
112
vendor/github.com/spf13/cobra/doc/yaml_docs.md
generated
vendored
112
vendor/github.com/spf13/cobra/doc/yaml_docs.md
generated
vendored
@@ -1,112 +0,0 @@
|
||||
# Generating Yaml Docs For Your Own cobra.Command
|
||||
|
||||
Generating yaml files from a cobra command is incredibly easy. An example is as follows:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd := &cobra.Command{
|
||||
Use: "test",
|
||||
Short: "my test program",
|
||||
}
|
||||
err := doc.GenYamlTree(cmd, "/tmp")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
That will get you a Yaml document `/tmp/test.yaml`
|
||||
|
||||
## Generate yaml docs for the entire command tree
|
||||
|
||||
This program can actually generate docs for the kubectl command in the kubernetes project
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
|
||||
err := doc.GenYamlTree(kubectl, "./")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
|
||||
|
||||
## Generate yaml docs for a single command
|
||||
|
||||
You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenYaml` instead of `GenYamlTree`
|
||||
|
||||
```go
|
||||
out := new(bytes.Buffer)
|
||||
doc.GenYaml(cmd, out)
|
||||
```
|
||||
|
||||
This will write the yaml doc for ONLY "cmd" into the out, buffer.
|
||||
|
||||
## Customize the output
|
||||
|
||||
Both `GenYaml` and `GenYamlTree` have alternate versions with callbacks to get some control of the output:
|
||||
|
||||
```go
|
||||
func GenYamlTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error {
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
func GenYamlCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error {
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
The `filePrepender` will prepend the return value given the full filepath to the rendered Yaml file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/):
|
||||
|
||||
```go
|
||||
const fmTemplate = `---
|
||||
date: %s
|
||||
title: "%s"
|
||||
slug: %s
|
||||
url: %s
|
||||
---
|
||||
`
|
||||
|
||||
filePrepender := func(filename string) string {
|
||||
now := time.Now().Format(time.RFC3339)
|
||||
name := filepath.Base(filename)
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
url := "/commands/" + strings.ToLower(base) + "/"
|
||||
return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
|
||||
}
|
||||
```
|
||||
|
||||
The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename:
|
||||
|
||||
```go
|
||||
linkHandler := func(name string) string {
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
return "/commands/" + strings.ToLower(base) + "/"
|
||||
}
|
||||
```
|
||||
2
vendor/github.com/spf13/cobra/fish_completions.go
generated
vendored
2
vendor/github.com/spf13/cobra/fish_completions.go
generated
vendored
@@ -113,7 +113,7 @@ function __%[1]s_clear_perform_completion_once_result
|
||||
__%[1]s_debug ""
|
||||
__%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable =========="
|
||||
set --erase __%[1]s_perform_completion_once_result
|
||||
__%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result"
|
||||
__%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result"
|
||||
end
|
||||
|
||||
function __%[1]s_requires_order_preservation
|
||||
|
||||
4
vendor/github.com/spf13/cobra/fish_completions.md
generated
vendored
4
vendor/github.com/spf13/cobra/fish_completions.md
generated
vendored
@@ -1,4 +0,0 @@
|
||||
## Generating Fish Completions For Your cobra.Command
|
||||
|
||||
Please refer to [Shell Completions](shell_completions.md) for details.
|
||||
|
||||
68
vendor/github.com/spf13/cobra/flag_groups.go
generated
vendored
68
vendor/github.com/spf13/cobra/flag_groups.go
generated
vendored
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
const (
|
||||
requiredAsGroup = "cobra_annotation_required_if_others_set"
|
||||
oneRequired = "cobra_annotation_one_required"
|
||||
mutuallyExclusive = "cobra_annotation_mutually_exclusive"
|
||||
)
|
||||
|
||||
@@ -43,6 +44,22 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors
|
||||
// if the command is invoked without at least one flag from the given set of flags.
|
||||
func (c *Command) MarkFlagsOneRequired(flagNames ...string) {
|
||||
c.mergePersistentFlags()
|
||||
for _, v := range flagNames {
|
||||
f := c.Flags().Lookup(v)
|
||||
if f == nil {
|
||||
panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v))
|
||||
}
|
||||
if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil {
|
||||
// Only errs if the flag isn't found.
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
|
||||
// if the command is invoked with more than one flag from the given set of flags.
|
||||
func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) {
|
||||
@@ -59,7 +76,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the
|
||||
// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the
|
||||
// first error encountered.
|
||||
func (c *Command) ValidateFlagGroups() error {
|
||||
if c.DisableFlagParsing {
|
||||
@@ -71,15 +88,20 @@ func (c *Command) ValidateFlagGroups() error {
|
||||
// groupStatus format is the list of flags as a unique ID,
|
||||
// then a map of each flag name and whether it is set or not.
|
||||
groupStatus := map[string]map[string]bool{}
|
||||
oneRequiredGroupStatus := map[string]map[string]bool{}
|
||||
mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
|
||||
flags.VisitAll(func(pflag *flag.Flag) {
|
||||
processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus)
|
||||
processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus)
|
||||
processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus)
|
||||
})
|
||||
|
||||
if err := validateRequiredFlagGroups(groupStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -142,6 +164,27 @@ func validateRequiredFlagGroups(data map[string]map[string]bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateOneRequiredFlagGroups(data map[string]map[string]bool) error {
|
||||
keys := sortedKeys(data)
|
||||
for _, flagList := range keys {
|
||||
flagnameAndStatus := data[flagList]
|
||||
var set []string
|
||||
for flagname, isSet := range flagnameAndStatus {
|
||||
if isSet {
|
||||
set = append(set, flagname)
|
||||
}
|
||||
}
|
||||
if len(set) >= 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Sort values, so they can be tested/scripted against consistently.
|
||||
sort.Strings(set)
|
||||
return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateExclusiveFlagGroups(data map[string]map[string]bool) error {
|
||||
keys := sortedKeys(data)
|
||||
for _, flagList := range keys {
|
||||
@@ -176,6 +219,7 @@ func sortedKeys(m map[string]map[string]bool) []string {
|
||||
|
||||
// enforceFlagGroupsForCompletion will do the following:
|
||||
// - when a flag in a group is present, other flags in the group will be marked required
|
||||
// - when none of the flags in a one-required group are present, all flags in the group will be marked required
|
||||
// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden
|
||||
// This allows the standard completion logic to behave appropriately for flag groups
|
||||
func (c *Command) enforceFlagGroupsForCompletion() {
|
||||
@@ -185,9 +229,11 @@ func (c *Command) enforceFlagGroupsForCompletion() {
|
||||
|
||||
flags := c.Flags()
|
||||
groupStatus := map[string]map[string]bool{}
|
||||
oneRequiredGroupStatus := map[string]map[string]bool{}
|
||||
mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
|
||||
c.Flags().VisitAll(func(pflag *flag.Flag) {
|
||||
processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus)
|
||||
processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus)
|
||||
processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus)
|
||||
})
|
||||
|
||||
@@ -204,6 +250,26 @@ func (c *Command) enforceFlagGroupsForCompletion() {
|
||||
}
|
||||
}
|
||||
|
||||
// If none of the flags of a one-required group are present, we make all the flags
|
||||
// of that group required so that the shell completion suggests them automatically
|
||||
for flagList, flagnameAndStatus := range oneRequiredGroupStatus {
|
||||
set := 0
|
||||
|
||||
for _, isSet := range flagnameAndStatus {
|
||||
if isSet {
|
||||
set++
|
||||
}
|
||||
}
|
||||
|
||||
// None of the flags of the group are set, mark all flags in the group
|
||||
// as required
|
||||
if set == 0 {
|
||||
for _, fName := range strings.Split(flagList, " ") {
|
||||
_ = c.MarkFlagRequired(fName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If a flag that is mutually exclusive to others is present, we hide the other
|
||||
// flags of that group so the shell completion does not suggest them
|
||||
for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus {
|
||||
|
||||
6
vendor/github.com/spf13/cobra/powershell_completions.go
generated
vendored
6
vendor/github.com/spf13/cobra/powershell_completions.go
generated
vendored
@@ -47,7 +47,7 @@ filter __%[1]s_escapeStringWithSpecialChars {
|
||||
`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+`
|
||||
}
|
||||
|
||||
[scriptblock]$__%[2]sCompleterBlock = {
|
||||
[scriptblock]${__%[2]sCompleterBlock} = {
|
||||
param(
|
||||
$WordToComplete,
|
||||
$CommandAst,
|
||||
@@ -122,7 +122,7 @@ filter __%[1]s_escapeStringWithSpecialChars {
|
||||
|
||||
__%[1]s_debug "Calling $RequestComp"
|
||||
# First disable ActiveHelp which is not supported for Powershell
|
||||
$env:%[10]s=0
|
||||
${env:%[10]s}=0
|
||||
|
||||
#call the command store the output in $out and redirect stderr and stdout to null
|
||||
# $Out is an array contains each line per element
|
||||
@@ -279,7 +279,7 @@ filter __%[1]s_escapeStringWithSpecialChars {
|
||||
}
|
||||
}
|
||||
|
||||
Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock
|
||||
Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock}
|
||||
`, name, nameForVar, compCmd,
|
||||
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
|
||||
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user