mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
[v0.34.0] bump to kubernetes 1.34 deps
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
This commit is contained in:
14
vendor/github.com/asaskevich/govalidator/.travis.yml
generated
vendored
14
vendor/github.com/asaskevich/govalidator/.travis.yml
generated
vendored
@@ -1,14 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
notifications:
|
||||
email:
|
||||
- bwatas@gmail.com
|
||||
63
vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
generated
vendored
63
vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
generated
vendored
@@ -1,63 +0,0 @@
|
||||
#### Support
|
||||
If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
|
||||
|
||||
#### What to contribute
|
||||
If you don't know what to do, there are some features and functions that need to be done
|
||||
|
||||
- [ ] Refactor code
|
||||
- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
|
||||
- [ ] Create actual list of contributors and projects that currently using this package
|
||||
- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
|
||||
- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
|
||||
- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
|
||||
- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
|
||||
- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
|
||||
- [ ] Implement fuzzing testing
|
||||
- [ ] Implement some struct/map/array utilities
|
||||
- [ ] Implement map/array validation
|
||||
- [ ] Implement benchmarking
|
||||
- [ ] Implement batch of examples
|
||||
- [ ] Look at forks for new features and fixes
|
||||
|
||||
#### Advice
|
||||
Feel free to create what you want, but keep in mind when you implement new features:
|
||||
- Code must be clear and readable, names of variables/constants clearly describes what they are doing
|
||||
- Public functions must be documented and described in source file and added to README.md to the list of available functions
|
||||
- There are must be unit-tests for any new functions and improvements
|
||||
|
||||
## Financial contributions
|
||||
|
||||
We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator).
|
||||
Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
|
||||
### Contributors
|
||||
|
||||
Thank you to all the people who have already contributed to govalidator!
|
||||
<a href="graphs/contributors"><img src="https://opencollective.com/govalidator/contributors.svg?width=890" /></a>
|
||||
|
||||
|
||||
### Backers
|
||||
|
||||
Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)]
|
||||
|
||||
<a href="https://opencollective.com/govalidator#backers" target="_blank"><img src="https://opencollective.com/govalidator/backers.svg?width=890"></a>
|
||||
|
||||
|
||||
### Sponsors
|
||||
|
||||
Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor))
|
||||
|
||||
<a href="https://opencollective.com/govalidator/sponsor/0/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/0/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/1/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/2/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/3/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/4/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/5/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/6/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/7/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/8/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a>
|
||||
21
vendor/github.com/asaskevich/govalidator/LICENSE
generated
vendored
21
vendor/github.com/asaskevich/govalidator/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Alex Saskevich
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
507
vendor/github.com/asaskevich/govalidator/README.md
generated
vendored
507
vendor/github.com/asaskevich/govalidator/README.md
generated
vendored
@@ -1,507 +0,0 @@
|
||||
govalidator
|
||||
===========
|
||||
[](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [](https://godoc.org/github.com/asaskevich/govalidator) [](https://coveralls.io/r/asaskevich/govalidator?branch=master) [](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043)
|
||||
[](https://travis-ci.org/asaskevich/govalidator) [](https://goreportcard.com/report/github.com/asaskevich/govalidator) [](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [](#backers) [](#sponsors) [](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
|
||||
|
||||
A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
|
||||
|
||||
#### Installation
|
||||
Make sure that Go is installed on your computer.
|
||||
Type the following command in your terminal:
|
||||
|
||||
go get github.com/asaskevich/govalidator
|
||||
|
||||
or you can get specified release of the package with `gopkg.in`:
|
||||
|
||||
go get gopkg.in/asaskevich/govalidator.v4
|
||||
|
||||
After it the package is ready to use.
|
||||
|
||||
|
||||
#### Import package in your project
|
||||
Add following line in your `*.go` file:
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
```
|
||||
If you are unhappy to use long `govalidator`, you can do something like this:
|
||||
```go
|
||||
import (
|
||||
valid "github.com/asaskevich/govalidator"
|
||||
)
|
||||
```
|
||||
|
||||
#### Activate behavior to require all fields have a validation tag by default
|
||||
`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
|
||||
|
||||
`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
|
||||
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
func init() {
|
||||
govalidator.SetFieldsRequiredByDefault(true)
|
||||
}
|
||||
```
|
||||
|
||||
Here's some code to explain it:
|
||||
```go
|
||||
// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
|
||||
type exampleStruct struct {
|
||||
Name string ``
|
||||
Email string `valid:"email"`
|
||||
}
|
||||
|
||||
// this, however, will only fail when Email is empty or an invalid email address:
|
||||
type exampleStruct2 struct {
|
||||
Name string `valid:"-"`
|
||||
Email string `valid:"email"`
|
||||
}
|
||||
|
||||
// lastly, this will only fail when Email is an invalid email address but not when it's empty:
|
||||
type exampleStruct2 struct {
|
||||
Name string `valid:"-"`
|
||||
Email string `valid:"email,optional"`
|
||||
}
|
||||
```
|
||||
|
||||
#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
|
||||
##### Custom validator function signature
|
||||
A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
// old signature
|
||||
func(i interface{}) bool
|
||||
|
||||
// new signature
|
||||
func(i interface{}, o interface{}) bool
|
||||
```
|
||||
|
||||
##### Adding a custom validator
|
||||
This was changed to prevent data races when accessing custom validators.
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
// before
|
||||
govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool {
|
||||
// ...
|
||||
})
|
||||
|
||||
// after
|
||||
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool {
|
||||
// ...
|
||||
}))
|
||||
```
|
||||
|
||||
#### List of functions:
|
||||
```go
|
||||
func Abs(value float64) float64
|
||||
func BlackList(str, chars string) string
|
||||
func ByteLength(str string, params ...string) bool
|
||||
func CamelCaseToUnderscore(str string) string
|
||||
func Contains(str, substring string) bool
|
||||
func Count(array []interface{}, iterator ConditionIterator) int
|
||||
func Each(array []interface{}, iterator Iterator)
|
||||
func ErrorByField(e error, field string) string
|
||||
func ErrorsByField(e error) map[string]string
|
||||
func Filter(array []interface{}, iterator ConditionIterator) []interface{}
|
||||
func Find(array []interface{}, iterator ConditionIterator) interface{}
|
||||
func GetLine(s string, index int) (string, error)
|
||||
func GetLines(s string) []string
|
||||
func InRange(value, left, right float64) bool
|
||||
func IsASCII(str string) bool
|
||||
func IsAlpha(str string) bool
|
||||
func IsAlphanumeric(str string) bool
|
||||
func IsBase64(str string) bool
|
||||
func IsByteLength(str string, min, max int) bool
|
||||
func IsCIDR(str string) bool
|
||||
func IsCreditCard(str string) bool
|
||||
func IsDNSName(str string) bool
|
||||
func IsDataURI(str string) bool
|
||||
func IsDialString(str string) bool
|
||||
func IsDivisibleBy(str, num string) bool
|
||||
func IsEmail(str string) bool
|
||||
func IsFilePath(str string) (bool, int)
|
||||
func IsFloat(str string) bool
|
||||
func IsFullWidth(str string) bool
|
||||
func IsHalfWidth(str string) bool
|
||||
func IsHexadecimal(str string) bool
|
||||
func IsHexcolor(str string) bool
|
||||
func IsHost(str string) bool
|
||||
func IsIP(str string) bool
|
||||
func IsIPv4(str string) bool
|
||||
func IsIPv6(str string) bool
|
||||
func IsISBN(str string, version int) bool
|
||||
func IsISBN10(str string) bool
|
||||
func IsISBN13(str string) bool
|
||||
func IsISO3166Alpha2(str string) bool
|
||||
func IsISO3166Alpha3(str string) bool
|
||||
func IsISO693Alpha2(str string) bool
|
||||
func IsISO693Alpha3b(str string) bool
|
||||
func IsISO4217(str string) bool
|
||||
func IsIn(str string, params ...string) bool
|
||||
func IsInt(str string) bool
|
||||
func IsJSON(str string) bool
|
||||
func IsLatitude(str string) bool
|
||||
func IsLongitude(str string) bool
|
||||
func IsLowerCase(str string) bool
|
||||
func IsMAC(str string) bool
|
||||
func IsMongoID(str string) bool
|
||||
func IsMultibyte(str string) bool
|
||||
func IsNatural(value float64) bool
|
||||
func IsNegative(value float64) bool
|
||||
func IsNonNegative(value float64) bool
|
||||
func IsNonPositive(value float64) bool
|
||||
func IsNull(str string) bool
|
||||
func IsNumeric(str string) bool
|
||||
func IsPort(str string) bool
|
||||
func IsPositive(value float64) bool
|
||||
func IsPrintableASCII(str string) bool
|
||||
func IsRFC3339(str string) bool
|
||||
func IsRFC3339WithoutZone(str string) bool
|
||||
func IsRGBcolor(str string) bool
|
||||
func IsRequestURI(rawurl string) bool
|
||||
func IsRequestURL(rawurl string) bool
|
||||
func IsSSN(str string) bool
|
||||
func IsSemver(str string) bool
|
||||
func IsTime(str string, format string) bool
|
||||
func IsURL(str string) bool
|
||||
func IsUTFDigit(str string) bool
|
||||
func IsUTFLetter(str string) bool
|
||||
func IsUTFLetterNumeric(str string) bool
|
||||
func IsUTFNumeric(str string) bool
|
||||
func IsUUID(str string) bool
|
||||
func IsUUIDv3(str string) bool
|
||||
func IsUUIDv4(str string) bool
|
||||
func IsUUIDv5(str string) bool
|
||||
func IsUpperCase(str string) bool
|
||||
func IsVariableWidth(str string) bool
|
||||
func IsWhole(value float64) bool
|
||||
func LeftTrim(str, chars string) string
|
||||
func Map(array []interface{}, iterator ResultIterator) []interface{}
|
||||
func Matches(str, pattern string) bool
|
||||
func NormalizeEmail(str string) (string, error)
|
||||
func PadBoth(str string, padStr string, padLen int) string
|
||||
func PadLeft(str string, padStr string, padLen int) string
|
||||
func PadRight(str string, padStr string, padLen int) string
|
||||
func Range(str string, params ...string) bool
|
||||
func RemoveTags(s string) string
|
||||
func ReplacePattern(str, pattern, replace string) string
|
||||
func Reverse(s string) string
|
||||
func RightTrim(str, chars string) string
|
||||
func RuneLength(str string, params ...string) bool
|
||||
func SafeFileName(str string) string
|
||||
func SetFieldsRequiredByDefault(value bool)
|
||||
func Sign(value float64) float64
|
||||
func StringLength(str string, params ...string) bool
|
||||
func StringMatches(s string, params ...string) bool
|
||||
func StripLow(str string, keepNewLines bool) string
|
||||
func ToBoolean(str string) (bool, error)
|
||||
func ToFloat(str string) (float64, error)
|
||||
func ToInt(str string) (int64, error)
|
||||
func ToJSON(obj interface{}) (string, error)
|
||||
func ToString(obj interface{}) string
|
||||
func Trim(str, chars string) string
|
||||
func Truncate(str string, length int, ending string) string
|
||||
func UnderscoreToCamelCase(s string) string
|
||||
func ValidateStruct(s interface{}) (bool, error)
|
||||
func WhiteList(str, chars string) string
|
||||
type ConditionIterator
|
||||
type CustomTypeValidator
|
||||
type Error
|
||||
func (e Error) Error() string
|
||||
type Errors
|
||||
func (es Errors) Error() string
|
||||
func (es Errors) Errors() []error
|
||||
type ISO3166Entry
|
||||
type Iterator
|
||||
type ParamValidator
|
||||
type ResultIterator
|
||||
type UnsupportedTypeError
|
||||
func (e *UnsupportedTypeError) Error() string
|
||||
type Validator
|
||||
```
|
||||
|
||||
#### Examples
|
||||
###### IsURL
|
||||
```go
|
||||
println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
|
||||
```
|
||||
###### ToString
|
||||
```go
|
||||
type User struct {
|
||||
FirstName string
|
||||
LastName string
|
||||
}
|
||||
|
||||
str := govalidator.ToString(&User{"John", "Juan"})
|
||||
println(str)
|
||||
```
|
||||
###### Each, Map, Filter, Count for slices
|
||||
Each iterates over the slice/array and calls Iterator for every item
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5}
|
||||
var fn govalidator.Iterator = func(value interface{}, index int) {
|
||||
println(value.(int))
|
||||
}
|
||||
govalidator.Each(data, fn)
|
||||
```
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5}
|
||||
var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
|
||||
return value.(int) * 3
|
||||
}
|
||||
_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
|
||||
```
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
|
||||
return value.(int)%2 == 0
|
||||
}
|
||||
_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
|
||||
_ = govalidator.Count(data, fn) // result = 5
|
||||
```
|
||||
###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
|
||||
If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
|
||||
```go
|
||||
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
||||
return str == "duck"
|
||||
})
|
||||
```
|
||||
For completely custom validators (interface-based), see below.
|
||||
|
||||
Here is a list of available validators for struct fields (validator - used function):
|
||||
```go
|
||||
"email": IsEmail,
|
||||
"url": IsURL,
|
||||
"dialstring": IsDialString,
|
||||
"requrl": IsRequestURL,
|
||||
"requri": IsRequestURI,
|
||||
"alpha": IsAlpha,
|
||||
"utfletter": IsUTFLetter,
|
||||
"alphanum": IsAlphanumeric,
|
||||
"utfletternum": IsUTFLetterNumeric,
|
||||
"numeric": IsNumeric,
|
||||
"utfnumeric": IsUTFNumeric,
|
||||
"utfdigit": IsUTFDigit,
|
||||
"hexadecimal": IsHexadecimal,
|
||||
"hexcolor": IsHexcolor,
|
||||
"rgbcolor": IsRGBcolor,
|
||||
"lowercase": IsLowerCase,
|
||||
"uppercase": IsUpperCase,
|
||||
"int": IsInt,
|
||||
"float": IsFloat,
|
||||
"null": IsNull,
|
||||
"uuid": IsUUID,
|
||||
"uuidv3": IsUUIDv3,
|
||||
"uuidv4": IsUUIDv4,
|
||||
"uuidv5": IsUUIDv5,
|
||||
"creditcard": IsCreditCard,
|
||||
"isbn10": IsISBN10,
|
||||
"isbn13": IsISBN13,
|
||||
"json": IsJSON,
|
||||
"multibyte": IsMultibyte,
|
||||
"ascii": IsASCII,
|
||||
"printableascii": IsPrintableASCII,
|
||||
"fullwidth": IsFullWidth,
|
||||
"halfwidth": IsHalfWidth,
|
||||
"variablewidth": IsVariableWidth,
|
||||
"base64": IsBase64,
|
||||
"datauri": IsDataURI,
|
||||
"ip": IsIP,
|
||||
"port": IsPort,
|
||||
"ipv4": IsIPv4,
|
||||
"ipv6": IsIPv6,
|
||||
"dns": IsDNSName,
|
||||
"host": IsHost,
|
||||
"mac": IsMAC,
|
||||
"latitude": IsLatitude,
|
||||
"longitude": IsLongitude,
|
||||
"ssn": IsSSN,
|
||||
"semver": IsSemver,
|
||||
"rfc3339": IsRFC3339,
|
||||
"rfc3339WithoutZone": IsRFC3339WithoutZone,
|
||||
"ISO3166Alpha2": IsISO3166Alpha2,
|
||||
"ISO3166Alpha3": IsISO3166Alpha3,
|
||||
```
|
||||
Validators with parameters
|
||||
|
||||
```go
|
||||
"range(min|max)": Range,
|
||||
"length(min|max)": ByteLength,
|
||||
"runelength(min|max)": RuneLength,
|
||||
"stringlength(min|max)": StringLength,
|
||||
"matches(pattern)": StringMatches,
|
||||
"in(string1|string2|...|stringN)": IsIn,
|
||||
"rsapub(keylength)" : IsRsaPub,
|
||||
```
|
||||
|
||||
And here is small example of usage:
|
||||
```go
|
||||
type Post struct {
|
||||
Title string `valid:"alphanum,required"`
|
||||
Message string `valid:"duck,ascii"`
|
||||
Message2 string `valid:"animal(dog)"`
|
||||
AuthorIP string `valid:"ipv4"`
|
||||
Date string `valid:"-"`
|
||||
}
|
||||
post := &Post{
|
||||
Title: "My Example Post",
|
||||
Message: "duck",
|
||||
Message2: "dog",
|
||||
AuthorIP: "123.234.54.3",
|
||||
}
|
||||
|
||||
// Add your own struct validation tags
|
||||
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
||||
return str == "duck"
|
||||
})
|
||||
|
||||
// Add your own struct validation tags with parameter
|
||||
govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool {
|
||||
species := params[0]
|
||||
return str == species
|
||||
})
|
||||
govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$")
|
||||
|
||||
result, err := govalidator.ValidateStruct(post)
|
||||
if err != nil {
|
||||
println("error: " + err.Error())
|
||||
}
|
||||
println(result)
|
||||
```
|
||||
###### WhiteList
|
||||
```go
|
||||
// Remove all characters from string ignoring characters between "a" and "z"
|
||||
println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
|
||||
```
|
||||
|
||||
###### Custom validation functions
|
||||
Custom validation using your own domain specific validators is also available - here's an example of how to use it:
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
type CustomByteArray [6]byte // custom types are supported and can be validated
|
||||
|
||||
type StructWithCustomByteArray struct {
|
||||
ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
|
||||
Email string `valid:"email"`
|
||||
CustomMinLength int `valid:"-"`
|
||||
}
|
||||
|
||||
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
|
||||
switch v := context.(type) { // you can type switch on the context interface being validated
|
||||
case StructWithCustomByteArray:
|
||||
// you can check and validate against some other field in the context,
|
||||
// return early or not validate against the context at all – your choice
|
||||
case SomeOtherType:
|
||||
// ...
|
||||
default:
|
||||
// expecting some other type? Throw/panic here or continue
|
||||
}
|
||||
|
||||
switch v := i.(type) { // type switch on the struct field being validated
|
||||
case CustomByteArray:
|
||||
for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
|
||||
if e != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}))
|
||||
govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
|
||||
switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
|
||||
case StructWithCustomByteArray:
|
||||
return len(v.ID) >= v.CustomMinLength
|
||||
}
|
||||
return false
|
||||
}))
|
||||
```
|
||||
|
||||
###### Custom error messages
|
||||
Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it:
|
||||
```go
|
||||
type Ticket struct {
|
||||
Id int64 `json:"id"`
|
||||
FirstName string `json:"firstname" valid:"required~First name is blank"`
|
||||
}
|
||||
```
|
||||
|
||||
#### Notes
|
||||
Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
|
||||
Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
|
||||
|
||||
#### Support
|
||||
If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
|
||||
|
||||
#### What to contribute
|
||||
If you don't know what to do, there are some features and functions that need to be done
|
||||
|
||||
- [ ] Refactor code
|
||||
- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
|
||||
- [ ] Create actual list of contributors and projects that currently using this package
|
||||
- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
|
||||
- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
|
||||
- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
|
||||
- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
|
||||
- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
|
||||
- [ ] Implement fuzzing testing
|
||||
- [ ] Implement some struct/map/array utilities
|
||||
- [ ] Implement map/array validation
|
||||
- [ ] Implement benchmarking
|
||||
- [ ] Implement batch of examples
|
||||
- [ ] Look at forks for new features and fixes
|
||||
|
||||
#### Advice
|
||||
Feel free to create what you want, but keep in mind when you implement new features:
|
||||
- Code must be clear and readable, names of variables/constants clearly describes what they are doing
|
||||
- Public functions must be documented and described in source file and added to README.md to the list of available functions
|
||||
- There are must be unit-tests for any new functions and improvements
|
||||
|
||||
## Credits
|
||||
### Contributors
|
||||
|
||||
This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
|
||||
|
||||
#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
|
||||
* [Daniel Lohse](https://github.com/annismckenzie)
|
||||
* [Attila Oláh](https://github.com/attilaolah)
|
||||
* [Daniel Korner](https://github.com/Dadie)
|
||||
* [Steven Wilkin](https://github.com/stevenwilkin)
|
||||
* [Deiwin Sarjas](https://github.com/deiwin)
|
||||
* [Noah Shibley](https://github.com/slugmobile)
|
||||
* [Nathan Davies](https://github.com/nathj07)
|
||||
* [Matt Sanford](https://github.com/mzsanford)
|
||||
* [Simon ccl1115](https://github.com/ccl1115)
|
||||
|
||||
<a href="graphs/contributors"><img src="https://opencollective.com/govalidator/contributors.svg?width=890" /></a>
|
||||
|
||||
|
||||
### Backers
|
||||
|
||||
Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)]
|
||||
|
||||
<a href="https://opencollective.com/govalidator#backers" target="_blank"><img src="https://opencollective.com/govalidator/backers.svg?width=890"></a>
|
||||
|
||||
|
||||
### Sponsors
|
||||
|
||||
Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)]
|
||||
|
||||
<a href="https://opencollective.com/govalidator/sponsor/0/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/0/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/1/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/2/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/3/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/4/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/5/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/6/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/7/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/8/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a>
|
||||
|
||||
|
||||
|
||||
|
||||
## License
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
|
||||
58
vendor/github.com/asaskevich/govalidator/arrays.go
generated
vendored
58
vendor/github.com/asaskevich/govalidator/arrays.go
generated
vendored
@@ -1,58 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
// Iterator is the function that accepts element of slice/array and its index
|
||||
type Iterator func(interface{}, int)
|
||||
|
||||
// ResultIterator is the function that accepts element of slice/array and its index and returns any result
|
||||
type ResultIterator func(interface{}, int) interface{}
|
||||
|
||||
// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
|
||||
type ConditionIterator func(interface{}, int) bool
|
||||
|
||||
// Each iterates over the slice and apply Iterator to every item
|
||||
func Each(array []interface{}, iterator Iterator) {
|
||||
for index, data := range array {
|
||||
iterator(data, index)
|
||||
}
|
||||
}
|
||||
|
||||
// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
|
||||
func Map(array []interface{}, iterator ResultIterator) []interface{} {
|
||||
var result = make([]interface{}, len(array))
|
||||
for index, data := range array {
|
||||
result[index] = iterator(data, index)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
|
||||
func Find(array []interface{}, iterator ConditionIterator) interface{} {
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
return data
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
|
||||
func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
|
||||
var result = make([]interface{}, 0)
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
result = append(result, data)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
|
||||
func Count(array []interface{}, iterator ConditionIterator) int {
|
||||
count := 0
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
count = count + 1
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
64
vendor/github.com/asaskevich/govalidator/converter.go
generated
vendored
64
vendor/github.com/asaskevich/govalidator/converter.go
generated
vendored
@@ -1,64 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ToString convert the input to a string.
|
||||
func ToString(obj interface{}) string {
|
||||
res := fmt.Sprintf("%v", obj)
|
||||
return string(res)
|
||||
}
|
||||
|
||||
// ToJSON convert the input to a valid JSON string
|
||||
func ToJSON(obj interface{}) (string, error) {
|
||||
res, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
res = []byte("")
|
||||
}
|
||||
return string(res), err
|
||||
}
|
||||
|
||||
// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
|
||||
func ToFloat(str string) (float64, error) {
|
||||
res, err := strconv.ParseFloat(str, 64)
|
||||
if err != nil {
|
||||
res = 0.0
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
|
||||
func ToInt(value interface{}) (res int64, err error) {
|
||||
val := reflect.ValueOf(value)
|
||||
|
||||
switch value.(type) {
|
||||
case int, int8, int16, int32, int64:
|
||||
res = val.Int()
|
||||
case uint, uint8, uint16, uint32, uint64:
|
||||
res = int64(val.Uint())
|
||||
case string:
|
||||
if IsInt(val.String()) {
|
||||
res, err = strconv.ParseInt(val.String(), 0, 64)
|
||||
if err != nil {
|
||||
res = 0
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("math: square root of negative number %g", value)
|
||||
res = 0
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("math: square root of negative number %g", value)
|
||||
res = 0
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ToBoolean convert the input string to a boolean.
|
||||
func ToBoolean(str string) (bool, error) {
|
||||
return strconv.ParseBool(str)
|
||||
}
|
||||
43
vendor/github.com/asaskevich/govalidator/error.go
generated
vendored
43
vendor/github.com/asaskevich/govalidator/error.go
generated
vendored
@@ -1,43 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import "strings"
|
||||
|
||||
// Errors is an array of multiple errors and conforms to the error interface.
|
||||
type Errors []error
|
||||
|
||||
// Errors returns itself.
|
||||
func (es Errors) Errors() []error {
|
||||
return es
|
||||
}
|
||||
|
||||
func (es Errors) Error() string {
|
||||
var errs []string
|
||||
for _, e := range es {
|
||||
errs = append(errs, e.Error())
|
||||
}
|
||||
return strings.Join(errs, ";")
|
||||
}
|
||||
|
||||
// Error encapsulates a name, an error and whether there's a custom error message or not.
|
||||
type Error struct {
|
||||
Name string
|
||||
Err error
|
||||
CustomErrorMessageExists bool
|
||||
|
||||
// Validator indicates the name of the validator that failed
|
||||
Validator string
|
||||
Path []string
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
if e.CustomErrorMessageExists {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
errName := e.Name
|
||||
if len(e.Path) > 0 {
|
||||
errName = strings.Join(append(e.Path, e.Name), ".")
|
||||
}
|
||||
|
||||
return errName + ": " + e.Err.Error()
|
||||
}
|
||||
97
vendor/github.com/asaskevich/govalidator/numerics.go
generated
vendored
97
vendor/github.com/asaskevich/govalidator/numerics.go
generated
vendored
@@ -1,97 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Abs returns absolute value of number
|
||||
func Abs(value float64) float64 {
|
||||
return math.Abs(value)
|
||||
}
|
||||
|
||||
// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
|
||||
func Sign(value float64) float64 {
|
||||
if value > 0 {
|
||||
return 1
|
||||
} else if value < 0 {
|
||||
return -1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// IsNegative returns true if value < 0
|
||||
func IsNegative(value float64) bool {
|
||||
return value < 0
|
||||
}
|
||||
|
||||
// IsPositive returns true if value > 0
|
||||
func IsPositive(value float64) bool {
|
||||
return value > 0
|
||||
}
|
||||
|
||||
// IsNonNegative returns true if value >= 0
|
||||
func IsNonNegative(value float64) bool {
|
||||
return value >= 0
|
||||
}
|
||||
|
||||
// IsNonPositive returns true if value <= 0
|
||||
func IsNonPositive(value float64) bool {
|
||||
return value <= 0
|
||||
}
|
||||
|
||||
// InRange returns true if value lies between left and right border
|
||||
func InRangeInt(value, left, right interface{}) bool {
|
||||
value64, _ := ToInt(value)
|
||||
left64, _ := ToInt(left)
|
||||
right64, _ := ToInt(right)
|
||||
if left64 > right64 {
|
||||
left64, right64 = right64, left64
|
||||
}
|
||||
return value64 >= left64 && value64 <= right64
|
||||
}
|
||||
|
||||
// InRange returns true if value lies between left and right border
|
||||
func InRangeFloat32(value, left, right float32) bool {
|
||||
if left > right {
|
||||
left, right = right, left
|
||||
}
|
||||
return value >= left && value <= right
|
||||
}
|
||||
|
||||
// InRange returns true if value lies between left and right border
|
||||
func InRangeFloat64(value, left, right float64) bool {
|
||||
if left > right {
|
||||
left, right = right, left
|
||||
}
|
||||
return value >= left && value <= right
|
||||
}
|
||||
|
||||
// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type
|
||||
func InRange(value interface{}, left interface{}, right interface{}) bool {
|
||||
|
||||
reflectValue := reflect.TypeOf(value).Kind()
|
||||
reflectLeft := reflect.TypeOf(left).Kind()
|
||||
reflectRight := reflect.TypeOf(right).Kind()
|
||||
|
||||
if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int {
|
||||
return InRangeInt(value.(int), left.(int), right.(int))
|
||||
} else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 {
|
||||
return InRangeFloat32(value.(float32), left.(float32), right.(float32))
|
||||
} else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 {
|
||||
return InRangeFloat64(value.(float64), left.(float64), right.(float64))
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// IsWhole returns true if value is whole number
|
||||
func IsWhole(value float64) bool {
|
||||
return math.Remainder(value, 1) == 0
|
||||
}
|
||||
|
||||
// IsNatural returns true if value is natural number (positive and whole)
|
||||
func IsNatural(value float64) bool {
|
||||
return IsWhole(value) && IsPositive(value)
|
||||
}
|
||||
101
vendor/github.com/asaskevich/govalidator/patterns.go
generated
vendored
101
vendor/github.com/asaskevich/govalidator/patterns.go
generated
vendored
@@ -1,101 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import "regexp"
|
||||
|
||||
// Basic regular expressions for validating strings
|
||||
const (
|
||||
Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
|
||||
CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
|
||||
ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
|
||||
ISBN13 string = "^(?:[0-9]{13})$"
|
||||
UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
||||
UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
||||
UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
||||
UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
||||
Alpha string = "^[a-zA-Z]+$"
|
||||
Alphanumeric string = "^[a-zA-Z0-9]+$"
|
||||
Numeric string = "^[0-9]+$"
|
||||
Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
|
||||
Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
|
||||
Hexadecimal string = "^[0-9a-fA-F]+$"
|
||||
Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
|
||||
RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
|
||||
ASCII string = "^[\x00-\x7F]+$"
|
||||
Multibyte string = "[^\x00-\x7F]"
|
||||
FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
||||
HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
||||
Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
|
||||
PrintableASCII string = "^[\x20-\x7E]+$"
|
||||
DataURI string = "^data:.+\\/(.+);base64$"
|
||||
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
|
||||
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
|
||||
DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
|
||||
IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
|
||||
URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
|
||||
URLUsername string = `(\S+(:\S*)?@)`
|
||||
URLPath string = `((\/|\?|#)[^\s]*)`
|
||||
URLPort string = `(:(\d{1,5}))`
|
||||
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
|
||||
URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
|
||||
URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
|
||||
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
|
||||
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
|
||||
UnixPath string = `^(/[^/\x00]*)+/?$`
|
||||
Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
|
||||
tagName string = "valid"
|
||||
hasLowerCase string = ".*[[:lower:]]"
|
||||
hasUpperCase string = ".*[[:upper:]]"
|
||||
hasWhitespace string = ".*[[:space:]]"
|
||||
hasWhitespaceOnly string = "^[[:space:]]+$"
|
||||
)
|
||||
|
||||
// Used by IsFilePath func
|
||||
const (
|
||||
// Unknown is unresolved OS type
|
||||
Unknown = iota
|
||||
// Win is Windows type
|
||||
Win
|
||||
// Unix is *nix OS types
|
||||
Unix
|
||||
)
|
||||
|
||||
var (
|
||||
userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
|
||||
hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
|
||||
userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
|
||||
rxEmail = regexp.MustCompile(Email)
|
||||
rxCreditCard = regexp.MustCompile(CreditCard)
|
||||
rxISBN10 = regexp.MustCompile(ISBN10)
|
||||
rxISBN13 = regexp.MustCompile(ISBN13)
|
||||
rxUUID3 = regexp.MustCompile(UUID3)
|
||||
rxUUID4 = regexp.MustCompile(UUID4)
|
||||
rxUUID5 = regexp.MustCompile(UUID5)
|
||||
rxUUID = regexp.MustCompile(UUID)
|
||||
rxAlpha = regexp.MustCompile(Alpha)
|
||||
rxAlphanumeric = regexp.MustCompile(Alphanumeric)
|
||||
rxNumeric = regexp.MustCompile(Numeric)
|
||||
rxInt = regexp.MustCompile(Int)
|
||||
rxFloat = regexp.MustCompile(Float)
|
||||
rxHexadecimal = regexp.MustCompile(Hexadecimal)
|
||||
rxHexcolor = regexp.MustCompile(Hexcolor)
|
||||
rxRGBcolor = regexp.MustCompile(RGBcolor)
|
||||
rxASCII = regexp.MustCompile(ASCII)
|
||||
rxPrintableASCII = regexp.MustCompile(PrintableASCII)
|
||||
rxMultibyte = regexp.MustCompile(Multibyte)
|
||||
rxFullWidth = regexp.MustCompile(FullWidth)
|
||||
rxHalfWidth = regexp.MustCompile(HalfWidth)
|
||||
rxBase64 = regexp.MustCompile(Base64)
|
||||
rxDataURI = regexp.MustCompile(DataURI)
|
||||
rxLatitude = regexp.MustCompile(Latitude)
|
||||
rxLongitude = regexp.MustCompile(Longitude)
|
||||
rxDNSName = regexp.MustCompile(DNSName)
|
||||
rxURL = regexp.MustCompile(URL)
|
||||
rxSSN = regexp.MustCompile(SSN)
|
||||
rxWinPath = regexp.MustCompile(WinPath)
|
||||
rxUnixPath = regexp.MustCompile(UnixPath)
|
||||
rxSemver = regexp.MustCompile(Semver)
|
||||
rxHasLowerCase = regexp.MustCompile(hasLowerCase)
|
||||
rxHasUpperCase = regexp.MustCompile(hasUpperCase)
|
||||
rxHasWhitespace = regexp.MustCompile(hasWhitespace)
|
||||
rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
|
||||
)
|
||||
636
vendor/github.com/asaskevich/govalidator/types.go
generated
vendored
636
vendor/github.com/asaskevich/govalidator/types.go
generated
vendored
@@ -1,636 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Validator is a wrapper for a validator function that returns bool and accepts string.
|
||||
type Validator func(str string) bool
|
||||
|
||||
// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
|
||||
// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
|
||||
type CustomTypeValidator func(i interface{}, o interface{}) bool
|
||||
|
||||
// ParamValidator is a wrapper for validator functions that accepts additional parameters.
|
||||
type ParamValidator func(str string, params ...string) bool
|
||||
type tagOptionsMap map[string]tagOption
|
||||
|
||||
func (t tagOptionsMap) orderedKeys() []string {
|
||||
var keys []string
|
||||
for k := range t {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
sort.Slice(keys, func(a, b int) bool {
|
||||
return t[keys[a]].order < t[keys[b]].order
|
||||
})
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
type tagOption struct {
|
||||
name string
|
||||
customErrorMessage string
|
||||
order int
|
||||
}
|
||||
|
||||
// UnsupportedTypeError is a wrapper for reflect.Type
|
||||
type UnsupportedTypeError struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
||||
// It implements the methods to sort by string.
|
||||
type stringValues []reflect.Value
|
||||
|
||||
// ParamTagMap is a map of functions accept variants parameters
|
||||
var ParamTagMap = map[string]ParamValidator{
|
||||
"length": ByteLength,
|
||||
"range": Range,
|
||||
"runelength": RuneLength,
|
||||
"stringlength": StringLength,
|
||||
"matches": StringMatches,
|
||||
"in": isInRaw,
|
||||
"rsapub": IsRsaPub,
|
||||
}
|
||||
|
||||
// ParamTagRegexMap maps param tags to their respective regexes.
|
||||
var ParamTagRegexMap = map[string]*regexp.Regexp{
|
||||
"range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"in": regexp.MustCompile(`^in\((.*)\)`),
|
||||
"matches": regexp.MustCompile(`^matches\((.+)\)$`),
|
||||
"rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
|
||||
}
|
||||
|
||||
type customTypeTagMap struct {
|
||||
validators map[string]CustomTypeValidator
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
|
||||
tm.RLock()
|
||||
defer tm.RUnlock()
|
||||
v, ok := tm.validators[name]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
|
||||
tm.Lock()
|
||||
defer tm.Unlock()
|
||||
tm.validators[name] = ctv
|
||||
}
|
||||
|
||||
// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
|
||||
// Use this to validate compound or custom types that need to be handled as a whole, e.g.
|
||||
// `type UUID [16]byte` (this would be handled as an array of bytes).
|
||||
var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
|
||||
|
||||
// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
|
||||
var TagMap = map[string]Validator{
|
||||
"email": IsEmail,
|
||||
"url": IsURL,
|
||||
"dialstring": IsDialString,
|
||||
"requrl": IsRequestURL,
|
||||
"requri": IsRequestURI,
|
||||
"alpha": IsAlpha,
|
||||
"utfletter": IsUTFLetter,
|
||||
"alphanum": IsAlphanumeric,
|
||||
"utfletternum": IsUTFLetterNumeric,
|
||||
"numeric": IsNumeric,
|
||||
"utfnumeric": IsUTFNumeric,
|
||||
"utfdigit": IsUTFDigit,
|
||||
"hexadecimal": IsHexadecimal,
|
||||
"hexcolor": IsHexcolor,
|
||||
"rgbcolor": IsRGBcolor,
|
||||
"lowercase": IsLowerCase,
|
||||
"uppercase": IsUpperCase,
|
||||
"int": IsInt,
|
||||
"float": IsFloat,
|
||||
"null": IsNull,
|
||||
"uuid": IsUUID,
|
||||
"uuidv3": IsUUIDv3,
|
||||
"uuidv4": IsUUIDv4,
|
||||
"uuidv5": IsUUIDv5,
|
||||
"creditcard": IsCreditCard,
|
||||
"isbn10": IsISBN10,
|
||||
"isbn13": IsISBN13,
|
||||
"json": IsJSON,
|
||||
"multibyte": IsMultibyte,
|
||||
"ascii": IsASCII,
|
||||
"printableascii": IsPrintableASCII,
|
||||
"fullwidth": IsFullWidth,
|
||||
"halfwidth": IsHalfWidth,
|
||||
"variablewidth": IsVariableWidth,
|
||||
"base64": IsBase64,
|
||||
"datauri": IsDataURI,
|
||||
"ip": IsIP,
|
||||
"port": IsPort,
|
||||
"ipv4": IsIPv4,
|
||||
"ipv6": IsIPv6,
|
||||
"dns": IsDNSName,
|
||||
"host": IsHost,
|
||||
"mac": IsMAC,
|
||||
"latitude": IsLatitude,
|
||||
"longitude": IsLongitude,
|
||||
"ssn": IsSSN,
|
||||
"semver": IsSemver,
|
||||
"rfc3339": IsRFC3339,
|
||||
"rfc3339WithoutZone": IsRFC3339WithoutZone,
|
||||
"ISO3166Alpha2": IsISO3166Alpha2,
|
||||
"ISO3166Alpha3": IsISO3166Alpha3,
|
||||
"ISO4217": IsISO4217,
|
||||
}
|
||||
|
||||
// ISO3166Entry stores country codes
|
||||
type ISO3166Entry struct {
|
||||
EnglishShortName string
|
||||
FrenchShortName string
|
||||
Alpha2Code string
|
||||
Alpha3Code string
|
||||
Numeric string
|
||||
}
|
||||
|
||||
//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
|
||||
var ISO3166List = []ISO3166Entry{
|
||||
{"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
|
||||
{"Albania", "Albanie (l')", "AL", "ALB", "008"},
|
||||
{"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
|
||||
{"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
|
||||
{"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
|
||||
{"Andorra", "Andorre (l')", "AD", "AND", "020"},
|
||||
{"Angola", "Angola (l')", "AO", "AGO", "024"},
|
||||
{"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
|
||||
{"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
|
||||
{"Argentina", "Argentine (l')", "AR", "ARG", "032"},
|
||||
{"Australia", "Australie (l')", "AU", "AUS", "036"},
|
||||
{"Austria", "Autriche (l')", "AT", "AUT", "040"},
|
||||
{"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
|
||||
{"Bahrain", "Bahreïn", "BH", "BHR", "048"},
|
||||
{"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
|
||||
{"Armenia", "Arménie (l')", "AM", "ARM", "051"},
|
||||
{"Barbados", "Barbade (la)", "BB", "BRB", "052"},
|
||||
{"Belgium", "Belgique (la)", "BE", "BEL", "056"},
|
||||
{"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
|
||||
{"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
|
||||
{"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
|
||||
{"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
|
||||
{"Botswana", "Botswana (le)", "BW", "BWA", "072"},
|
||||
{"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
|
||||
{"Brazil", "Brésil (le)", "BR", "BRA", "076"},
|
||||
{"Belize", "Belize (le)", "BZ", "BLZ", "084"},
|
||||
{"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
|
||||
{"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
|
||||
{"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
|
||||
{"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
|
||||
{"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
|
||||
{"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
|
||||
{"Burundi", "Burundi (le)", "BI", "BDI", "108"},
|
||||
{"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
|
||||
{"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
|
||||
{"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
|
||||
{"Canada", "Canada (le)", "CA", "CAN", "124"},
|
||||
{"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
|
||||
{"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
|
||||
{"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
|
||||
{"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
|
||||
{"Chad", "Tchad (le)", "TD", "TCD", "148"},
|
||||
{"Chile", "Chili (le)", "CL", "CHL", "152"},
|
||||
{"China", "Chine (la)", "CN", "CHN", "156"},
|
||||
{"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
|
||||
{"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
|
||||
{"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
|
||||
{"Colombia", "Colombie (la)", "CO", "COL", "170"},
|
||||
{"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
|
||||
{"Mayotte", "Mayotte", "YT", "MYT", "175"},
|
||||
{"Congo (the)", "Congo (le)", "CG", "COG", "178"},
|
||||
{"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
|
||||
{"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
|
||||
{"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
|
||||
{"Croatia", "Croatie (la)", "HR", "HRV", "191"},
|
||||
{"Cuba", "Cuba", "CU", "CUB", "192"},
|
||||
{"Cyprus", "Chypre", "CY", "CYP", "196"},
|
||||
{"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
|
||||
{"Benin", "Bénin (le)", "BJ", "BEN", "204"},
|
||||
{"Denmark", "Danemark (le)", "DK", "DNK", "208"},
|
||||
{"Dominica", "Dominique (la)", "DM", "DMA", "212"},
|
||||
{"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
|
||||
{"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
|
||||
{"El Salvador", "El Salvador", "SV", "SLV", "222"},
|
||||
{"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
|
||||
{"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
|
||||
{"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
|
||||
{"Estonia", "Estonie (l')", "EE", "EST", "233"},
|
||||
{"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
|
||||
{"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
|
||||
{"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
|
||||
{"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
|
||||
{"Finland", "Finlande (la)", "FI", "FIN", "246"},
|
||||
{"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
|
||||
{"France", "France (la)", "FR", "FRA", "250"},
|
||||
{"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
|
||||
{"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
|
||||
{"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
|
||||
{"Djibouti", "Djibouti", "DJ", "DJI", "262"},
|
||||
{"Gabon", "Gabon (le)", "GA", "GAB", "266"},
|
||||
{"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
|
||||
{"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
|
||||
{"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
|
||||
{"Germany", "Allemagne (l')", "DE", "DEU", "276"},
|
||||
{"Ghana", "Ghana (le)", "GH", "GHA", "288"},
|
||||
{"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
|
||||
{"Kiribati", "Kiribati", "KI", "KIR", "296"},
|
||||
{"Greece", "Grèce (la)", "GR", "GRC", "300"},
|
||||
{"Greenland", "Groenland (le)", "GL", "GRL", "304"},
|
||||
{"Grenada", "Grenade (la)", "GD", "GRD", "308"},
|
||||
{"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
|
||||
{"Guam", "Guam", "GU", "GUM", "316"},
|
||||
{"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
|
||||
{"Guinea", "Guinée (la)", "GN", "GIN", "324"},
|
||||
{"Guyana", "Guyana (le)", "GY", "GUY", "328"},
|
||||
{"Haiti", "Haïti", "HT", "HTI", "332"},
|
||||
{"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
|
||||
{"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
|
||||
{"Honduras", "Honduras (le)", "HN", "HND", "340"},
|
||||
{"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
|
||||
{"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
|
||||
{"Iceland", "Islande (l')", "IS", "ISL", "352"},
|
||||
{"India", "Inde (l')", "IN", "IND", "356"},
|
||||
{"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
|
||||
{"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
|
||||
{"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
|
||||
{"Ireland", "Irlande (l')", "IE", "IRL", "372"},
|
||||
{"Israel", "Israël", "IL", "ISR", "376"},
|
||||
{"Italy", "Italie (l')", "IT", "ITA", "380"},
|
||||
{"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
|
||||
{"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
|
||||
{"Japan", "Japon (le)", "JP", "JPN", "392"},
|
||||
{"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
|
||||
{"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
|
||||
{"Kenya", "Kenya (le)", "KE", "KEN", "404"},
|
||||
{"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
|
||||
{"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
|
||||
{"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
|
||||
{"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
|
||||
{"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
|
||||
{"Lebanon", "Liban (le)", "LB", "LBN", "422"},
|
||||
{"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
|
||||
{"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
|
||||
{"Liberia", "Libéria (le)", "LR", "LBR", "430"},
|
||||
{"Libya", "Libye (la)", "LY", "LBY", "434"},
|
||||
{"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
|
||||
{"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
|
||||
{"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
|
||||
{"Macao", "Macao", "MO", "MAC", "446"},
|
||||
{"Madagascar", "Madagascar", "MG", "MDG", "450"},
|
||||
{"Malawi", "Malawi (le)", "MW", "MWI", "454"},
|
||||
{"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
|
||||
{"Maldives", "Maldives (les)", "MV", "MDV", "462"},
|
||||
{"Mali", "Mali (le)", "ML", "MLI", "466"},
|
||||
{"Malta", "Malte", "MT", "MLT", "470"},
|
||||
{"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
|
||||
{"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
|
||||
{"Mauritius", "Maurice", "MU", "MUS", "480"},
|
||||
{"Mexico", "Mexique (le)", "MX", "MEX", "484"},
|
||||
{"Monaco", "Monaco", "MC", "MCO", "492"},
|
||||
{"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
|
||||
{"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
|
||||
{"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
|
||||
{"Montserrat", "Montserrat", "MS", "MSR", "500"},
|
||||
{"Morocco", "Maroc (le)", "MA", "MAR", "504"},
|
||||
{"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
|
||||
{"Oman", "Oman", "OM", "OMN", "512"},
|
||||
{"Namibia", "Namibie (la)", "NA", "NAM", "516"},
|
||||
{"Nauru", "Nauru", "NR", "NRU", "520"},
|
||||
{"Nepal", "Népal (le)", "NP", "NPL", "524"},
|
||||
{"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
|
||||
{"Curaçao", "Curaçao", "CW", "CUW", "531"},
|
||||
{"Aruba", "Aruba", "AW", "ABW", "533"},
|
||||
{"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
|
||||
{"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
|
||||
{"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
|
||||
{"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
|
||||
{"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
|
||||
{"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
|
||||
{"Niger (the)", "Niger (le)", "NE", "NER", "562"},
|
||||
{"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
|
||||
{"Niue", "Niue", "NU", "NIU", "570"},
|
||||
{"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
|
||||
{"Norway", "Norvège (la)", "NO", "NOR", "578"},
|
||||
{"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
|
||||
{"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
|
||||
{"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
|
||||
{"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
|
||||
{"Palau", "Palaos (les)", "PW", "PLW", "585"},
|
||||
{"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
|
||||
{"Panama", "Panama (le)", "PA", "PAN", "591"},
|
||||
{"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
|
||||
{"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
|
||||
{"Peru", "Pérou (le)", "PE", "PER", "604"},
|
||||
{"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
|
||||
{"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
|
||||
{"Poland", "Pologne (la)", "PL", "POL", "616"},
|
||||
{"Portugal", "Portugal (le)", "PT", "PRT", "620"},
|
||||
{"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
|
||||
{"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
|
||||
{"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
|
||||
{"Qatar", "Qatar (le)", "QA", "QAT", "634"},
|
||||
{"Réunion", "Réunion (La)", "RE", "REU", "638"},
|
||||
{"Romania", "Roumanie (la)", "RO", "ROU", "642"},
|
||||
{"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
|
||||
{"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
|
||||
{"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
|
||||
{"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
|
||||
{"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
|
||||
{"Anguilla", "Anguilla", "AI", "AIA", "660"},
|
||||
{"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
|
||||
{"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
|
||||
{"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
|
||||
{"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
|
||||
{"San Marino", "Saint-Marin", "SM", "SMR", "674"},
|
||||
{"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
|
||||
{"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
|
||||
{"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
|
||||
{"Serbia", "Serbie (la)", "RS", "SRB", "688"},
|
||||
{"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
|
||||
{"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
|
||||
{"Singapore", "Singapour", "SG", "SGP", "702"},
|
||||
{"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
|
||||
{"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
|
||||
{"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
|
||||
{"Somalia", "Somalie (la)", "SO", "SOM", "706"},
|
||||
{"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
|
||||
{"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
|
||||
{"Spain", "Espagne (l')", "ES", "ESP", "724"},
|
||||
{"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
|
||||
{"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
|
||||
{"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
|
||||
{"Suriname", "Suriname (le)", "SR", "SUR", "740"},
|
||||
{"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
|
||||
{"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
|
||||
{"Sweden", "Suède (la)", "SE", "SWE", "752"},
|
||||
{"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
|
||||
{"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
|
||||
{"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
|
||||
{"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
|
||||
{"Togo", "Togo (le)", "TG", "TGO", "768"},
|
||||
{"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
|
||||
{"Tonga", "Tonga (les)", "TO", "TON", "776"},
|
||||
{"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
|
||||
{"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
|
||||
{"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
|
||||
{"Turkey", "Turquie (la)", "TR", "TUR", "792"},
|
||||
{"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
|
||||
{"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
|
||||
{"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
|
||||
{"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
|
||||
{"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
|
||||
{"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
|
||||
{"Egypt", "Égypte (l')", "EG", "EGY", "818"},
|
||||
{"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
|
||||
{"Guernsey", "Guernesey", "GG", "GGY", "831"},
|
||||
{"Jersey", "Jersey", "JE", "JEY", "832"},
|
||||
{"Isle of Man", "Île de Man", "IM", "IMN", "833"},
|
||||
{"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
|
||||
{"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
|
||||
{"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
|
||||
{"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
|
||||
{"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
|
||||
{"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
|
||||
{"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
|
||||
{"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
|
||||
{"Samoa", "Samoa (le)", "WS", "WSM", "882"},
|
||||
{"Yemen", "Yémen (le)", "YE", "YEM", "887"},
|
||||
{"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
|
||||
}
|
||||
|
||||
// ISO4217List is the list of ISO currency codes
|
||||
var ISO4217List = []string{
|
||||
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
|
||||
"BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
|
||||
"CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
|
||||
"DJF", "DKK", "DOP", "DZD",
|
||||
"EGP", "ERN", "ETB", "EUR",
|
||||
"FJD", "FKP",
|
||||
"GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
|
||||
"HKD", "HNL", "HRK", "HTG", "HUF",
|
||||
"IDR", "ILS", "INR", "IQD", "IRR", "ISK",
|
||||
"JMD", "JOD", "JPY",
|
||||
"KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
|
||||
"LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
|
||||
"MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
|
||||
"NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
|
||||
"OMR",
|
||||
"PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
|
||||
"QAR",
|
||||
"RON", "RSD", "RUB", "RWF",
|
||||
"SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL",
|
||||
"THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
|
||||
"UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS",
|
||||
"VEF", "VND", "VUV",
|
||||
"WST",
|
||||
"XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
|
||||
"YER",
|
||||
"ZAR", "ZMW", "ZWL",
|
||||
}
|
||||
|
||||
// ISO693Entry stores ISO language codes
|
||||
type ISO693Entry struct {
|
||||
Alpha3bCode string
|
||||
Alpha2Code string
|
||||
English string
|
||||
}
|
||||
|
||||
//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
|
||||
var ISO693List = []ISO693Entry{
|
||||
{Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
|
||||
{Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
|
||||
{Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
|
||||
{Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
|
||||
{Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
|
||||
{Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
|
||||
{Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
|
||||
{Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
|
||||
{Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
|
||||
{Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
|
||||
{Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
|
||||
{Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
|
||||
{Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
|
||||
{Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
|
||||
{Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
|
||||
{Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
|
||||
{Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
|
||||
{Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
|
||||
{Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
|
||||
{Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
|
||||
{Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
|
||||
{Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
|
||||
{Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
|
||||
{Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
|
||||
{Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
|
||||
{Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
|
||||
{Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
|
||||
{Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
|
||||
{Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
|
||||
{Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
|
||||
{Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
|
||||
{Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
|
||||
{Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
|
||||
{Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
|
||||
{Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
|
||||
{Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
|
||||
{Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
|
||||
{Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
|
||||
{Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
|
||||
{Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
|
||||
{Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
|
||||
{Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
|
||||
{Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
|
||||
{Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
|
||||
{Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
|
||||
{Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
|
||||
{Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
|
||||
{Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
|
||||
{Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
|
||||
{Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
|
||||
{Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
|
||||
{Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
|
||||
{Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
|
||||
{Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
|
||||
{Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
|
||||
{Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
|
||||
{Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
|
||||
{Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
|
||||
{Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
|
||||
{Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
|
||||
{Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
|
||||
{Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
|
||||
{Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
|
||||
{Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
|
||||
{Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
|
||||
{Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
|
||||
{Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
|
||||
{Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
|
||||
{Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
|
||||
{Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
|
||||
{Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
|
||||
{Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
|
||||
{Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
|
||||
{Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
|
||||
{Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
|
||||
{Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
|
||||
{Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
|
||||
{Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
|
||||
{Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
|
||||
{Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
|
||||
{Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
|
||||
{Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
|
||||
{Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
|
||||
{Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
|
||||
{Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
|
||||
{Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
|
||||
{Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
|
||||
{Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
|
||||
{Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
|
||||
{Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
|
||||
{Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
|
||||
{Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
|
||||
{Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
|
||||
{Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
|
||||
{Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
|
||||
{Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
|
||||
{Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
|
||||
{Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
|
||||
{Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
|
||||
{Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
|
||||
{Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
|
||||
{Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
|
||||
{Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
|
||||
{Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
|
||||
{Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
|
||||
{Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
|
||||
{Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
|
||||
{Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
|
||||
{Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
|
||||
{Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
|
||||
{Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
|
||||
{Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
|
||||
{Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
|
||||
{Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
|
||||
{Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
|
||||
{Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
|
||||
{Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
|
||||
{Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
|
||||
{Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
|
||||
{Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
|
||||
{Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
|
||||
{Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
|
||||
{Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
|
||||
{Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
|
||||
{Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
|
||||
{Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
|
||||
{Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
|
||||
{Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
|
||||
{Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
|
||||
{Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
|
||||
{Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
|
||||
{Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
|
||||
{Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
|
||||
{Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
|
||||
{Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
|
||||
{Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
|
||||
{Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
|
||||
{Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
|
||||
{Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
|
||||
{Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
|
||||
{Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
|
||||
{Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
|
||||
{Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
|
||||
{Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
|
||||
{Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
|
||||
{Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
|
||||
{Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
|
||||
{Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
|
||||
{Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
|
||||
{Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
|
||||
{Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
|
||||
{Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
|
||||
{Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
|
||||
{Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
|
||||
{Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
|
||||
{Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
|
||||
{Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
|
||||
{Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
|
||||
{Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
|
||||
{Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
|
||||
{Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
|
||||
{Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
|
||||
{Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
|
||||
{Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
|
||||
{Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
|
||||
{Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
|
||||
{Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
|
||||
{Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
|
||||
{Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
|
||||
{Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
|
||||
{Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
|
||||
{Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
|
||||
{Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
|
||||
{Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
|
||||
{Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
|
||||
{Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
|
||||
{Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
|
||||
{Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
|
||||
{Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
|
||||
{Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
|
||||
{Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
|
||||
{Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
|
||||
{Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
|
||||
{Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
|
||||
}
|
||||
270
vendor/github.com/asaskevich/govalidator/utils.go
generated
vendored
270
vendor/github.com/asaskevich/govalidator/utils.go
generated
vendored
@@ -1,270 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"html"
|
||||
"math"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Contains check if the string contains the substring.
|
||||
func Contains(str, substring string) bool {
|
||||
return strings.Contains(str, substring)
|
||||
}
|
||||
|
||||
// Matches check if string matches the pattern (pattern is regular expression)
|
||||
// In case of error return false
|
||||
func Matches(str, pattern string) bool {
|
||||
match, _ := regexp.MatchString(pattern, str)
|
||||
return match
|
||||
}
|
||||
|
||||
// LeftTrim trim characters from the left-side of the input.
|
||||
// If second argument is empty, it's will be remove leading spaces.
|
||||
func LeftTrim(str, chars string) string {
|
||||
if chars == "" {
|
||||
return strings.TrimLeftFunc(str, unicode.IsSpace)
|
||||
}
|
||||
r, _ := regexp.Compile("^[" + chars + "]+")
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// RightTrim trim characters from the right-side of the input.
|
||||
// If second argument is empty, it's will be remove spaces.
|
||||
func RightTrim(str, chars string) string {
|
||||
if chars == "" {
|
||||
return strings.TrimRightFunc(str, unicode.IsSpace)
|
||||
}
|
||||
r, _ := regexp.Compile("[" + chars + "]+$")
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// Trim trim characters from both sides of the input.
|
||||
// If second argument is empty, it's will be remove spaces.
|
||||
func Trim(str, chars string) string {
|
||||
return LeftTrim(RightTrim(str, chars), chars)
|
||||
}
|
||||
|
||||
// WhiteList remove characters that do not appear in the whitelist.
|
||||
func WhiteList(str, chars string) string {
|
||||
pattern := "[^" + chars + "]+"
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// BlackList remove characters that appear in the blacklist.
|
||||
func BlackList(str, chars string) string {
|
||||
pattern := "[" + chars + "]+"
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// StripLow remove characters with a numerical value < 32 and 127, mostly control characters.
|
||||
// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
|
||||
func StripLow(str string, keepNewLines bool) string {
|
||||
chars := ""
|
||||
if keepNewLines {
|
||||
chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
|
||||
} else {
|
||||
chars = "\x00-\x1F\x7F"
|
||||
}
|
||||
return BlackList(str, chars)
|
||||
}
|
||||
|
||||
// ReplacePattern replace regular expression pattern in string
|
||||
func ReplacePattern(str, pattern, replace string) string {
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return r.ReplaceAllString(str, replace)
|
||||
}
|
||||
|
||||
// Escape replace <, >, & and " with HTML entities.
|
||||
var Escape = html.EscapeString
|
||||
|
||||
func addSegment(inrune, segment []rune) []rune {
|
||||
if len(segment) == 0 {
|
||||
return inrune
|
||||
}
|
||||
if len(inrune) != 0 {
|
||||
inrune = append(inrune, '_')
|
||||
}
|
||||
inrune = append(inrune, segment...)
|
||||
return inrune
|
||||
}
|
||||
|
||||
// UnderscoreToCamelCase converts from underscore separated form to camel case form.
|
||||
// Ex.: my_func => MyFunc
|
||||
func UnderscoreToCamelCase(s string) string {
|
||||
return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
|
||||
}
|
||||
|
||||
// CamelCaseToUnderscore converts from camel case form to underscore separated form.
|
||||
// Ex.: MyFunc => my_func
|
||||
func CamelCaseToUnderscore(str string) string {
|
||||
var output []rune
|
||||
var segment []rune
|
||||
for _, r := range str {
|
||||
|
||||
// not treat number as separate segment
|
||||
if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
|
||||
output = addSegment(output, segment)
|
||||
segment = nil
|
||||
}
|
||||
segment = append(segment, unicode.ToLower(r))
|
||||
}
|
||||
output = addSegment(output, segment)
|
||||
return string(output)
|
||||
}
|
||||
|
||||
// Reverse return reversed string
|
||||
func Reverse(s string) string {
|
||||
r := []rune(s)
|
||||
for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
|
||||
r[i], r[j] = r[j], r[i]
|
||||
}
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// GetLines split string by "\n" and return array of lines
|
||||
func GetLines(s string) []string {
|
||||
return strings.Split(s, "\n")
|
||||
}
|
||||
|
||||
// GetLine return specified line of multiline string
|
||||
func GetLine(s string, index int) (string, error) {
|
||||
lines := GetLines(s)
|
||||
if index < 0 || index >= len(lines) {
|
||||
return "", errors.New("line index out of bounds")
|
||||
}
|
||||
return lines[index], nil
|
||||
}
|
||||
|
||||
// RemoveTags remove all tags from HTML string
|
||||
func RemoveTags(s string) string {
|
||||
return ReplacePattern(s, "<[^>]*>", "")
|
||||
}
|
||||
|
||||
// SafeFileName return safe string that can be used in file names
|
||||
func SafeFileName(str string) string {
|
||||
name := strings.ToLower(str)
|
||||
name = path.Clean(path.Base(name))
|
||||
name = strings.Trim(name, " ")
|
||||
separators, err := regexp.Compile(`[ &_=+:]`)
|
||||
if err == nil {
|
||||
name = separators.ReplaceAllString(name, "-")
|
||||
}
|
||||
legal, err := regexp.Compile(`[^[:alnum:]-.]`)
|
||||
if err == nil {
|
||||
name = legal.ReplaceAllString(name, "")
|
||||
}
|
||||
for strings.Contains(name, "--") {
|
||||
name = strings.Replace(name, "--", "-", -1)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// NormalizeEmail canonicalize an email address.
|
||||
// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
|
||||
// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
|
||||
// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
|
||||
// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
|
||||
// normalized to @gmail.com.
|
||||
func NormalizeEmail(str string) (string, error) {
|
||||
if !IsEmail(str) {
|
||||
return "", fmt.Errorf("%s is not an email", str)
|
||||
}
|
||||
parts := strings.Split(str, "@")
|
||||
parts[0] = strings.ToLower(parts[0])
|
||||
parts[1] = strings.ToLower(parts[1])
|
||||
if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
|
||||
parts[1] = "gmail.com"
|
||||
parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
|
||||
}
|
||||
return strings.Join(parts, "@"), nil
|
||||
}
|
||||
|
||||
// Truncate a string to the closest length without breaking words.
|
||||
func Truncate(str string, length int, ending string) string {
|
||||
var aftstr, befstr string
|
||||
if len(str) > length {
|
||||
words := strings.Fields(str)
|
||||
before, present := 0, 0
|
||||
for i := range words {
|
||||
befstr = aftstr
|
||||
before = present
|
||||
aftstr = aftstr + words[i] + " "
|
||||
present = len(aftstr)
|
||||
if present > length && i != 0 {
|
||||
if (length - before) < (present - length) {
|
||||
return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
|
||||
}
|
||||
return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// PadLeft pad left side of string if size of string is less then indicated pad length
|
||||
func PadLeft(str string, padStr string, padLen int) string {
|
||||
return buildPadStr(str, padStr, padLen, true, false)
|
||||
}
|
||||
|
||||
// PadRight pad right side of string if size of string is less then indicated pad length
|
||||
func PadRight(str string, padStr string, padLen int) string {
|
||||
return buildPadStr(str, padStr, padLen, false, true)
|
||||
}
|
||||
|
||||
// PadBoth pad sides of string if size of string is less then indicated pad length
|
||||
func PadBoth(str string, padStr string, padLen int) string {
|
||||
return buildPadStr(str, padStr, padLen, true, true)
|
||||
}
|
||||
|
||||
// PadString either left, right or both sides, not the padding string can be unicode and more then one
|
||||
// character
|
||||
func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
|
||||
|
||||
// When padded length is less then the current string size
|
||||
if padLen < utf8.RuneCountInString(str) {
|
||||
return str
|
||||
}
|
||||
|
||||
padLen -= utf8.RuneCountInString(str)
|
||||
|
||||
targetLen := padLen
|
||||
|
||||
targetLenLeft := targetLen
|
||||
targetLenRight := targetLen
|
||||
if padLeft && padRight {
|
||||
targetLenLeft = padLen / 2
|
||||
targetLenRight = padLen - targetLenLeft
|
||||
}
|
||||
|
||||
strToRepeatLen := utf8.RuneCountInString(padStr)
|
||||
|
||||
repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
|
||||
repeatedString := strings.Repeat(padStr, repeatTimes)
|
||||
|
||||
leftSide := ""
|
||||
if padLeft {
|
||||
leftSide = repeatedString[0:targetLenLeft]
|
||||
}
|
||||
|
||||
rightSide := ""
|
||||
if padRight {
|
||||
rightSide = repeatedString[0:targetLenRight]
|
||||
}
|
||||
|
||||
return leftSide + str + rightSide
|
||||
}
|
||||
|
||||
// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
|
||||
func TruncatingErrorf(str string, args ...interface{}) error {
|
||||
n := strings.Count(str, "%s")
|
||||
return fmt.Errorf(str, args[:n]...)
|
||||
}
|
||||
1278
vendor/github.com/asaskevich/govalidator/validator.go
generated
vendored
1278
vendor/github.com/asaskevich/govalidator/validator.go
generated
vendored
File diff suppressed because it is too large
Load Diff
15
vendor/github.com/asaskevich/govalidator/wercker.yml
generated
vendored
15
vendor/github.com/asaskevich/govalidator/wercker.yml
generated
vendored
@@ -1,15 +0,0 @@
|
||||
box: golang
|
||||
build:
|
||||
steps:
|
||||
- setup-go-workspace
|
||||
|
||||
- script:
|
||||
name: go get
|
||||
code: |
|
||||
go version
|
||||
go get -t ./...
|
||||
|
||||
- script:
|
||||
name: go test
|
||||
code: |
|
||||
go test -race ./...
|
||||
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type backOffContext struct {
|
||||
BackOff
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
||||
if b, ok := b.(*backOffContext); ok {
|
||||
return &backOffContext{
|
||||
BackOff: b.BackOff,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return &backOffContext{
|
||||
BackOff: b,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func getContext(b BackOff) context.Context {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb.Context()
|
||||
}
|
||||
if tb, ok := b.(*backOffTries); ok {
|
||||
return getContext(tb.delegate)
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
return b.BackOff.NextBackOff()
|
||||
}
|
||||
}
|
||||
216
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
216
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
@@ -1,216 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration
|
||||
Stop time.Duration
|
||||
Clock Clock
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
|
||||
type ExponentialBackOffOpts func(*ExponentialBackOff)
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
DefaultMaxElapsedTime = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Stop: Stop,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
for _, fn := range opts {
|
||||
fn(b)
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
// WithInitialInterval sets the initial interval between retries.
|
||||
func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.InitialInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
|
||||
func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.RandomizationFactor = randomizationFactor
|
||||
}
|
||||
}
|
||||
|
||||
// WithMultiplier sets the multiplier for increasing the interval after each retry.
|
||||
func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Multiplier = multiplier
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxInterval sets the maximum interval between retries.
|
||||
func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.MaxInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxElapsedTime sets the maximum total time for retries.
|
||||
func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.MaxElapsedTime = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryStopDuration sets the duration after which retries should stop.
|
||||
func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Stop = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithClockProvider sets the clock used to measure time.
|
||||
func WithClockProvider(clock Clock) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
elapsed := b.GetElapsedTime()
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
|
||||
return b.Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||
return b.Clock.Now().Sub(b.startTime)
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
||||
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
@@ -1,146 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type OperationWithData[T any] func() (T, error)
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
func (o Operation) withEmptyData() OperationWithData[struct{}] {
|
||||
return func() (struct{}, error) {
|
||||
return struct{}{}, o()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryWithData is like Retry but returns data in the response too.
|
||||
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
|
||||
return RetryNotifyWithData(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
|
||||
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
|
||||
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, t)
|
||||
}
|
||||
|
||||
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
var (
|
||||
err error
|
||||
next time.Duration
|
||||
res T
|
||||
)
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
|
||||
ctx := getContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
res, err = operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if cerr := ctx.Err(); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
t.Start(next)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return res, ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *PermanentError) Is(target error) bool {
|
||||
_, ok := target.(*PermanentError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
/*
|
||||
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||
return Stop if NextBackOff() has been called too many times since
|
||||
the last time Reset() was called
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||
return &backOffTries{delegate: b, maxTries: max}
|
||||
}
|
||||
|
||||
type backOffTries struct {
|
||||
delegate BackOff
|
||||
maxTries uint64
|
||||
numTries uint64
|
||||
}
|
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration {
|
||||
if b.maxTries == 0 {
|
||||
return Stop
|
||||
}
|
||||
if b.maxTries > 0 {
|
||||
if b.maxTries <= b.numTries {
|
||||
return Stop
|
||||
}
|
||||
b.numTries++
|
||||
}
|
||||
return b.delegate.NextBackOff()
|
||||
}
|
||||
|
||||
func (b *backOffTries) Reset() {
|
||||
b.numTries = 0
|
||||
b.delegate.Reset()
|
||||
}
|
||||
29
vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
generated
vendored
Normal file
29
vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [5.0.0] - 2024-12-19
|
||||
|
||||
### Added
|
||||
|
||||
- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry.
|
||||
|
||||
### Changed
|
||||
|
||||
- Retry function now accepts additional options for specifying max number of tries and max elapsed time.
|
||||
- Retry function now accepts a context.Context.
|
||||
- Operation function signature changed to return result (any type) and error.
|
||||
|
||||
### Removed
|
||||
|
||||
- RetryNotify* and RetryWithData functions. Only single Retry function remains.
|
||||
- Optional arguments from ExponentialBackoff constructor.
|
||||
- Clock and Timer interfaces.
|
||||
|
||||
### Fixed
|
||||
|
||||
- The original error is returned from Retry if there's a PermanentError. (#144)
|
||||
- The Retry function respects the wrapped PermanentError. (#140)
|
||||
@@ -1,4 +1,4 @@
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc]
|
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||
|
||||
@@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold
|
||||
|
||||
## Usage
|
||||
|
||||
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
|
||||
Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end.
|
||||
|
||||
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
For most cases, use `Retry` function. See [example_test.go][example] for an example.
|
||||
|
||||
If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed.
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
* Please don't send a PR without opening an issue and discussing it first.
|
||||
* If proposed change is not a common use case, I will probably not accept it.
|
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5
|
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||
|
||||
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||
|
||||
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
|
||||
[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go
|
||||
[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go
|
||||
@@ -15,16 +15,16 @@ import "time"
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface {
|
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
// backoff.Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
// duration := backoff.NextBackOff()
|
||||
// if duration == backoff.Stop {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
//
|
||||
NextBackOff() time.Duration
|
||||
|
||||
46
vendor/github.com/cenkalti/backoff/v5/error.go
generated
vendored
Normal file
46
vendor/github.com/cenkalti/backoff/v5/error.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns a string representation of the Permanent error.
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// Unwrap returns the wrapped error.
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// RetryAfterError signals that the operation should be retried after the given duration.
|
||||
type RetryAfterError struct {
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying.
|
||||
func RetryAfter(seconds int) error {
|
||||
return &RetryAfterError{Duration: time.Duration(seconds) * time.Second}
|
||||
}
|
||||
|
||||
// Error returns a string representation of the RetryAfter error.
|
||||
func (e *RetryAfterError) Error() string {
|
||||
return fmt.Sprintf("retry after %s", e.Duration)
|
||||
}
|
||||
125
vendor/github.com/cenkalti/backoff/v5/exponential.go
generated
vendored
Normal file
125
vendor/github.com/cenkalti/backoff/v5/exponential.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
|
||||
currentInterval time.Duration
|
||||
}
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
return &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
//
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
if b.currentInterval == 0 {
|
||||
b.currentInterval = b.InitialInterval
|
||||
}
|
||||
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
return next
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
//
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
||||
139
vendor/github.com/cenkalti/backoff/v5/retry.go
generated
vendored
Normal file
139
vendor/github.com/cenkalti/backoff/v5/retry.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultMaxElapsedTime sets a default limit for the total retry duration.
|
||||
const DefaultMaxElapsedTime = 15 * time.Minute
|
||||
|
||||
// Operation is a function that attempts an operation and may be retried.
|
||||
type Operation[T any] func() (T, error)
|
||||
|
||||
// Notify is a function called on operation error with the error and backoff duration.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// retryOptions holds configuration settings for the retry mechanism.
|
||||
type retryOptions struct {
|
||||
BackOff BackOff // Strategy for calculating backoff periods.
|
||||
Timer timer // Timer to manage retry delays.
|
||||
Notify Notify // Optional function to notify on each retry error.
|
||||
MaxTries uint // Maximum number of retry attempts.
|
||||
MaxElapsedTime time.Duration // Maximum total time for all retries.
|
||||
}
|
||||
|
||||
type RetryOption func(*retryOptions)
|
||||
|
||||
// WithBackOff configures a custom backoff strategy.
|
||||
func WithBackOff(b BackOff) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.BackOff = b
|
||||
}
|
||||
}
|
||||
|
||||
// withTimer sets a custom timer for managing delays between retries.
|
||||
func withTimer(t timer) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.Timer = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithNotify sets a notification function to handle retry errors.
|
||||
func WithNotify(n Notify) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.Notify = n
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxTries limits the number of retry attempts.
|
||||
func WithMaxTries(n uint) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.MaxTries = n
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxElapsedTime limits the total duration for retry attempts.
|
||||
func WithMaxElapsedTime(d time.Duration) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.MaxElapsedTime = d
|
||||
}
|
||||
}
|
||||
|
||||
// Retry attempts the operation until success, a permanent error, or backoff completion.
|
||||
// It ensures the operation is executed at least once.
|
||||
//
|
||||
// Returns the operation result or error if retries are exhausted or context is cancelled.
|
||||
func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) {
|
||||
// Initialize default retry options.
|
||||
args := &retryOptions{
|
||||
BackOff: NewExponentialBackOff(),
|
||||
Timer: &defaultTimer{},
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
}
|
||||
|
||||
// Apply user-provided options to the default settings.
|
||||
for _, opt := range opts {
|
||||
opt(args)
|
||||
}
|
||||
|
||||
defer args.Timer.Stop()
|
||||
|
||||
startedAt := time.Now()
|
||||
args.BackOff.Reset()
|
||||
for numTries := uint(1); ; numTries++ {
|
||||
// Execute the operation.
|
||||
res, err := operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Stop retrying if maximum tries exceeded.
|
||||
if args.MaxTries > 0 && numTries >= args.MaxTries {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Handle permanent errors without retrying.
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Stop retrying if context is cancelled.
|
||||
if cerr := context.Cause(ctx); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
// Calculate next backoff duration.
|
||||
next := args.BackOff.NextBackOff()
|
||||
if next == Stop {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Reset backoff if RetryAfterError is encountered.
|
||||
var retryAfter *RetryAfterError
|
||||
if errors.As(err, &retryAfter) {
|
||||
next = retryAfter.Duration
|
||||
args.BackOff.Reset()
|
||||
}
|
||||
|
||||
// Stop retrying if maximum elapsed time exceeded.
|
||||
if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Notify on error if a notifier function is provided.
|
||||
if args.Notify != nil {
|
||||
args.Notify(err, next)
|
||||
}
|
||||
|
||||
// Wait for the next backoff period or context cancellation.
|
||||
args.Timer.Start(next)
|
||||
select {
|
||||
case <-args.Timer.C():
|
||||
case <-ctx.Done():
|
||||
return res, context.Cause(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -14,8 +13,7 @@ type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOff
|
||||
ctx context.Context
|
||||
timer Timer
|
||||
timer timer
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
@@ -27,22 +25,12 @@ type Ticker struct {
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
return NewTickerWithTimer(b, &defaultTimer{})
|
||||
}
|
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||
if timer == nil {
|
||||
timer = &defaultTimer{}
|
||||
}
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: b,
|
||||
ctx: getContext(b),
|
||||
timer: timer,
|
||||
timer: &defaultTimer{},
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
@@ -73,8 +61,6 @@ func (t *Ticker) run() {
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@ package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
type timer interface {
|
||||
Start(duration time.Duration)
|
||||
Stop()
|
||||
C() <-chan time.Time
|
||||
62
vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go
generated
vendored
Normal file
62
vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
package md2man
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/russross/blackfriday/v2"
|
||||
)
|
||||
|
||||
func fmtListFlags(flags blackfriday.ListType) string {
|
||||
knownFlags := []struct {
|
||||
name string
|
||||
flag blackfriday.ListType
|
||||
}{
|
||||
{"ListTypeOrdered", blackfriday.ListTypeOrdered},
|
||||
{"ListTypeDefinition", blackfriday.ListTypeDefinition},
|
||||
{"ListTypeTerm", blackfriday.ListTypeTerm},
|
||||
{"ListItemContainsBlock", blackfriday.ListItemContainsBlock},
|
||||
{"ListItemBeginningOfList", blackfriday.ListItemBeginningOfList},
|
||||
{"ListItemEndOfList", blackfriday.ListItemEndOfList},
|
||||
}
|
||||
|
||||
var f []string
|
||||
for _, kf := range knownFlags {
|
||||
if flags&kf.flag != 0 {
|
||||
f = append(f, kf.name)
|
||||
flags &^= kf.flag
|
||||
}
|
||||
}
|
||||
if flags != 0 {
|
||||
f = append(f, fmt.Sprintf("Unknown(%#x)", flags))
|
||||
}
|
||||
return strings.Join(f, "|")
|
||||
}
|
||||
|
||||
type debugDecorator struct {
|
||||
blackfriday.Renderer
|
||||
}
|
||||
|
||||
func depth(node *blackfriday.Node) int {
|
||||
d := 0
|
||||
for n := node.Parent; n != nil; n = n.Parent {
|
||||
d++
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *debugDecorator) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||
fmt.Fprintf(os.Stderr, "%s%s %v %v\n",
|
||||
strings.Repeat(" ", depth(node)),
|
||||
map[bool]string{true: "+", false: "-"}[entering],
|
||||
node,
|
||||
fmtListFlags(node.ListFlags))
|
||||
var b strings.Builder
|
||||
status := d.Renderer.RenderNode(io.MultiWriter(&b, w), node, entering)
|
||||
if b.Len() > 0 {
|
||||
fmt.Fprintf(os.Stderr, ">> %q\n", b.String())
|
||||
}
|
||||
return status
|
||||
}
|
||||
9
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
9
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
@@ -1,16 +1,23 @@
|
||||
package md2man
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/russross/blackfriday/v2"
|
||||
)
|
||||
|
||||
// Render converts a markdown document into a roff formatted document.
|
||||
func Render(doc []byte) []byte {
|
||||
renderer := NewRoffRenderer()
|
||||
var r blackfriday.Renderer = renderer
|
||||
if v, _ := strconv.ParseBool(os.Getenv("MD2MAN_DEBUG")); v {
|
||||
r = &debugDecorator{Renderer: r}
|
||||
}
|
||||
|
||||
return blackfriday.Run(doc,
|
||||
[]blackfriday.Option{
|
||||
blackfriday.WithRenderer(renderer),
|
||||
blackfriday.WithRenderer(r),
|
||||
blackfriday.WithExtensions(renderer.GetExtensions()),
|
||||
}...)
|
||||
}
|
||||
|
||||
97
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
97
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
@@ -14,10 +14,8 @@ import (
|
||||
// roffRenderer implements the blackfriday.Renderer interface for creating
|
||||
// roff format (manpages) from markdown text
|
||||
type roffRenderer struct {
|
||||
extensions blackfriday.Extensions
|
||||
listCounters []int
|
||||
firstHeader bool
|
||||
firstDD bool
|
||||
listDepth int
|
||||
}
|
||||
|
||||
@@ -43,7 +41,7 @@ const (
|
||||
quoteTag = "\n.PP\n.RS\n"
|
||||
quoteCloseTag = "\n.RE\n"
|
||||
listTag = "\n.RS\n"
|
||||
listCloseTag = "\n.RE\n"
|
||||
listCloseTag = ".RE\n"
|
||||
dtTag = "\n.TP\n"
|
||||
dd2Tag = "\n"
|
||||
tableStart = "\n.TS\nallbox;\n"
|
||||
@@ -56,23 +54,18 @@ const (
|
||||
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||||
// from markdown
|
||||
func NewRoffRenderer() *roffRenderer { // nolint: golint
|
||||
var extensions blackfriday.Extensions
|
||||
|
||||
extensions |= blackfriday.NoIntraEmphasis
|
||||
extensions |= blackfriday.Tables
|
||||
extensions |= blackfriday.FencedCode
|
||||
extensions |= blackfriday.SpaceHeadings
|
||||
extensions |= blackfriday.Footnotes
|
||||
extensions |= blackfriday.Titleblock
|
||||
extensions |= blackfriday.DefinitionLists
|
||||
return &roffRenderer{
|
||||
extensions: extensions,
|
||||
}
|
||||
return &roffRenderer{}
|
||||
}
|
||||
|
||||
// GetExtensions returns the list of extensions used by this renderer implementation
|
||||
func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
|
||||
return r.extensions
|
||||
func (*roffRenderer) GetExtensions() blackfriday.Extensions {
|
||||
return blackfriday.NoIntraEmphasis |
|
||||
blackfriday.Tables |
|
||||
blackfriday.FencedCode |
|
||||
blackfriday.SpaceHeadings |
|
||||
blackfriday.Footnotes |
|
||||
blackfriday.Titleblock |
|
||||
blackfriday.DefinitionLists
|
||||
}
|
||||
|
||||
// RenderHeader handles outputting the header at document start
|
||||
@@ -103,7 +96,23 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
||||
|
||||
switch node.Type {
|
||||
case blackfriday.Text:
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
// Special case: format the NAME section as required for proper whatis parsing.
|
||||
// Refer to the lexgrog(1) and groff_man(7) manual pages for details.
|
||||
if node.Parent != nil &&
|
||||
node.Parent.Type == blackfriday.Paragraph &&
|
||||
node.Parent.Prev != nil &&
|
||||
node.Parent.Prev.Type == blackfriday.Heading &&
|
||||
node.Parent.Prev.FirstChild != nil &&
|
||||
bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) {
|
||||
before, after, found := bytesCut(node.Literal, []byte(" - "))
|
||||
escapeSpecialChars(w, before)
|
||||
if found {
|
||||
out(w, ` \- `)
|
||||
escapeSpecialChars(w, after)
|
||||
}
|
||||
} else {
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
}
|
||||
case blackfriday.Softbreak:
|
||||
out(w, crTag)
|
||||
case blackfriday.Hardbreak:
|
||||
@@ -141,14 +150,25 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
||||
case blackfriday.Document:
|
||||
break
|
||||
case blackfriday.Paragraph:
|
||||
// roff .PP markers break lists
|
||||
if r.listDepth > 0 {
|
||||
return blackfriday.GoToNext
|
||||
}
|
||||
if entering {
|
||||
out(w, paraTag)
|
||||
if r.listDepth > 0 {
|
||||
// roff .PP markers break lists
|
||||
if node.Prev != nil { // continued paragraph
|
||||
if node.Prev.Type == blackfriday.List && node.Prev.ListFlags&blackfriday.ListTypeDefinition == 0 {
|
||||
out(w, ".IP\n")
|
||||
} else {
|
||||
out(w, crTag)
|
||||
}
|
||||
}
|
||||
} else if node.Prev != nil && node.Prev.Type == blackfriday.Heading {
|
||||
out(w, crTag)
|
||||
} else {
|
||||
out(w, paraTag)
|
||||
}
|
||||
} else {
|
||||
out(w, crTag)
|
||||
if node.Next == nil || node.Next.Type != blackfriday.List {
|
||||
out(w, crTag)
|
||||
}
|
||||
}
|
||||
case blackfriday.BlockQuote:
|
||||
if entering {
|
||||
@@ -211,6 +231,10 @@ func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, enteri
|
||||
func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
openTag := listTag
|
||||
closeTag := listCloseTag
|
||||
if (entering && r.listDepth == 0) || (!entering && r.listDepth == 1) {
|
||||
openTag = crTag
|
||||
closeTag = ""
|
||||
}
|
||||
if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||
// tags for definition lists handled within Item node
|
||||
openTag = ""
|
||||
@@ -239,23 +263,25 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering
|
||||
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
|
||||
// DT (definition term): line just before DD (see below).
|
||||
out(w, dtTag)
|
||||
r.firstDD = true
|
||||
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||
// DD (definition description): line that starts with ": ".
|
||||
//
|
||||
// We have to distinguish between the first DD and the
|
||||
// subsequent ones, as there should be no vertical
|
||||
// whitespace between the DT and the first DD.
|
||||
if r.firstDD {
|
||||
r.firstDD = false
|
||||
} else {
|
||||
out(w, dd2Tag)
|
||||
if node.Prev != nil && node.Prev.ListFlags&(blackfriday.ListTypeTerm|blackfriday.ListTypeDefinition) == blackfriday.ListTypeDefinition {
|
||||
if node.Prev.Type == blackfriday.Item &&
|
||||
node.Prev.LastChild != nil &&
|
||||
node.Prev.LastChild.Type == blackfriday.List &&
|
||||
node.Prev.LastChild.ListFlags&blackfriday.ListTypeDefinition == 0 {
|
||||
out(w, ".IP\n")
|
||||
} else {
|
||||
out(w, dd2Tag)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out(w, ".IP \\(bu 2\n")
|
||||
}
|
||||
} else {
|
||||
out(w, "\n")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -380,3 +406,12 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
|
||||
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||||
}
|
||||
}
|
||||
|
||||
// bytesCut is a copy of [bytes.Cut] to provide compatibility with go1.17
|
||||
// and older. We can remove this once we drop support for go1.17 and older.
|
||||
func bytesCut(s, sep []byte) (before, after []byte, found bool) {
|
||||
if i := bytes.Index(s, sep); i >= 0 {
|
||||
return s[:i], s[i+len(sep):], true
|
||||
}
|
||||
return s, nil, false
|
||||
}
|
||||
|
||||
21
vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
21
vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
@@ -1,5 +1,26 @@
|
||||
# Change history of go-restful
|
||||
|
||||
## [v3.12.2] - 2025-02-21
|
||||
|
||||
- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt)
|
||||
|
||||
## [v3.12.1] - 2024-05-28
|
||||
|
||||
- fix misroute when dealing multiple webservice with regex (#549) (thanks Haitao Chen)
|
||||
|
||||
## [v3.12.0] - 2024-03-11
|
||||
|
||||
- add Flush method #529 (#538)
|
||||
- fix: Improper handling of empty POST requests (#543)
|
||||
|
||||
## [v3.11.3] - 2024-01-09
|
||||
|
||||
- better not have 2 tags on one commit
|
||||
|
||||
## [v3.11.1, v3.11.2] - 2024-01-09
|
||||
|
||||
- fix by restoring custom JSON handler functions (Mike Beaumont #540)
|
||||
|
||||
## [v3.11.0] - 2023-08-19
|
||||
|
||||
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.
|
||||
|
||||
6
vendor/github.com/emicklei/go-restful/v3/README.md
generated
vendored
6
vendor/github.com/emicklei/go-restful/v3/README.md
generated
vendored
@@ -2,9 +2,8 @@ go-restful
|
||||
==========
|
||||
package for building REST-style Web Services using Google Go
|
||||
|
||||
[](https://travis-ci.org/emicklei/go-restful)
|
||||
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
||||
[](https://pkg.go.dev/github.com/emicklei/go-restful)
|
||||
[](https://pkg.go.dev/github.com/emicklei/go-restful/v3)
|
||||
[](https://codecov.io/gh/emicklei/go-restful)
|
||||
|
||||
- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
|
||||
@@ -95,8 +94,7 @@ There are several hooks to customize the behavior of the go-restful package.
|
||||
- Trace logging
|
||||
- Compression
|
||||
- Encoders for other serializers
|
||||
- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
|
||||
- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
|
||||
- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
|
||||
|
||||
## Resources
|
||||
|
||||
|
||||
10
vendor/github.com/emicklei/go-restful/v3/compress.go
generated
vendored
10
vendor/github.com/emicklei/go-restful/v3/compress.go
generated
vendored
@@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
|
||||
return c.writer.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it.
|
||||
func (c *CompressingResponseWriter) Flush() {
|
||||
flusher, ok := c.writer.(http.Flusher)
|
||||
if !ok {
|
||||
// writer doesn't support http.Flusher interface
|
||||
return
|
||||
}
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
// Close the underlying compressor
|
||||
func (c *CompressingResponseWriter) Close() error {
|
||||
if c.isCompressorClosed() {
|
||||
|
||||
48
vendor/github.com/emicklei/go-restful/v3/curly.go
generated
vendored
48
vendor/github.com/emicklei/go-restful/v3/curly.go
generated
vendored
@@ -46,10 +46,10 @@ func (c CurlyRouter) SelectRoute(
|
||||
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
|
||||
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
||||
candidates := make(sortableCurlyRoutes, 0, 8)
|
||||
for _, each := range ws.routes {
|
||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb)
|
||||
for _, eachRoute := range ws.routes {
|
||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(eachRoute.pathParts, requestTokens, eachRoute.hasCustomVerb)
|
||||
if matches {
|
||||
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
||||
candidates.add(curlyRoute{eachRoute, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
||||
}
|
||||
}
|
||||
sort.Sort(candidates)
|
||||
@@ -72,7 +72,7 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin
|
||||
return false, 0, 0
|
||||
}
|
||||
requestToken := requestTokens[i]
|
||||
if routeHasCustomVerb && hasCustomVerb(routeToken){
|
||||
if routeHasCustomVerb && hasCustomVerb(routeToken) {
|
||||
if !isMatchCustomVerb(routeToken, requestToken) {
|
||||
return false, 0, 0
|
||||
}
|
||||
@@ -129,44 +129,52 @@ func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpReques
|
||||
// detectWebService returns the best matching webService given the list of path tokens.
|
||||
// see also computeWebserviceScore
|
||||
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
|
||||
var best *WebService
|
||||
var bestWs *WebService
|
||||
score := -1
|
||||
for _, each := range webServices {
|
||||
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
|
||||
for _, eachWS := range webServices {
|
||||
matches, eachScore := c.computeWebserviceScore(requestTokens, eachWS.pathExpr.tokens)
|
||||
if matches && (eachScore > score) {
|
||||
best = each
|
||||
bestWs = eachWS
|
||||
score = eachScore
|
||||
}
|
||||
}
|
||||
return best
|
||||
return bestWs
|
||||
}
|
||||
|
||||
// computeWebserviceScore returns whether tokens match and
|
||||
// the weighted score of the longest matching consecutive tokens from the beginning.
|
||||
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
|
||||
if len(tokens) > len(requestTokens) {
|
||||
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens []string) (bool, int) {
|
||||
if len(routeTokens) > len(requestTokens) {
|
||||
return false, 0
|
||||
}
|
||||
score := 0
|
||||
for i := 0; i < len(tokens); i++ {
|
||||
each := requestTokens[i]
|
||||
other := tokens[i]
|
||||
if len(each) == 0 && len(other) == 0 {
|
||||
for i := 0; i < len(routeTokens); i++ {
|
||||
eachRequestToken := requestTokens[i]
|
||||
eachRouteToken := routeTokens[i]
|
||||
if len(eachRequestToken) == 0 && len(eachRouteToken) == 0 {
|
||||
score++
|
||||
continue
|
||||
}
|
||||
if len(other) > 0 && strings.HasPrefix(other, "{") {
|
||||
if len(eachRouteToken) > 0 && strings.HasPrefix(eachRouteToken, "{") {
|
||||
// no empty match
|
||||
if len(each) == 0 {
|
||||
if len(eachRequestToken) == 0 {
|
||||
return false, score
|
||||
}
|
||||
score += 1
|
||||
score++
|
||||
|
||||
if colon := strings.Index(eachRouteToken, ":"); colon != -1 {
|
||||
// match by regex
|
||||
matchesToken, _ := c.regularMatchesPathToken(eachRouteToken, colon, eachRequestToken)
|
||||
if matchesToken {
|
||||
score++ // extra score for regex match
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// not a parameter
|
||||
if each != other {
|
||||
if eachRequestToken != eachRouteToken {
|
||||
return false, score
|
||||
}
|
||||
score += (len(tokens) - i) * 10 //fuzzy
|
||||
score += (len(routeTokens) - i) * 10 //fuzzy
|
||||
}
|
||||
}
|
||||
return true, score
|
||||
|
||||
7
vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
generated
vendored
7
vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
generated
vendored
@@ -5,11 +5,18 @@ package restful
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
||||
|
||||
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
|
||||
type EntityReaderWriter interface {
|
||||
// Read a serialized version of the value from the request.
|
||||
|
||||
11
vendor/github.com/emicklei/go-restful/v3/json.go
generated
vendored
11
vendor/github.com/emicklei/go-restful/v3/json.go
generated
vendored
@@ -1,11 +0,0 @@
|
||||
// +build !jsoniter
|
||||
|
||||
package restful
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
var (
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
||||
12
vendor/github.com/emicklei/go-restful/v3/jsoniter.go
generated
vendored
12
vendor/github.com/emicklei/go-restful/v3/jsoniter.go
generated
vendored
@@ -1,12 +0,0 @@
|
||||
// +build jsoniter
|
||||
|
||||
package restful
|
||||
|
||||
import "github.com/json-iterator/go"
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
||||
19
vendor/github.com/emicklei/go-restful/v3/jsr311.go
generated
vendored
19
vendor/github.com/emicklei/go-restful/v3/jsr311.go
generated
vendored
@@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma
|
||||
return params
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
|
||||
// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/
|
||||
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
|
||||
candidates := make([]*Route, 0, 8)
|
||||
for i, each := range routes {
|
||||
@@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
|
||||
}
|
||||
if httpRequest.ContentLength > 0 {
|
||||
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
|
||||
}
|
||||
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
|
||||
}
|
||||
|
||||
// accept
|
||||
@@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
|
||||
for _, candidate := range previous {
|
||||
available = append(available, candidate.Produces...)
|
||||
}
|
||||
// if POST,PUT,PATCH without body
|
||||
method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
|
||||
if (method == http.MethodPost ||
|
||||
method == http.MethodPut ||
|
||||
method == http.MethodPatch) && length == "" {
|
||||
return nil, NewError(
|
||||
http.StatusUnsupportedMediaType,
|
||||
fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
|
||||
)
|
||||
}
|
||||
return nil, NewError(
|
||||
http.StatusNotAcceptable,
|
||||
fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")),
|
||||
)
|
||||
fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")))
|
||||
}
|
||||
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
|
||||
return candidates[0], nil
|
||||
|
||||
2
vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
2
vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
@@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
|
||||
}
|
||||
|
||||
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
|
||||
// If the route does not specify Consumes then return true (*/*).
|
||||
// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE.
|
||||
func (r Route) matchesContentType(mimeTypes string) bool {
|
||||
|
||||
if len(r.Consumes) == 0 {
|
||||
|
||||
7
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
7
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
@@ -1,7 +1,7 @@
|
||||
freebsd_task:
|
||||
name: 'FreeBSD'
|
||||
freebsd_instance:
|
||||
image_family: freebsd-13-2
|
||||
image_family: freebsd-14-2
|
||||
install_script:
|
||||
- pkg update -f
|
||||
- pkg install -y go
|
||||
@@ -9,5 +9,6 @@ freebsd_task:
|
||||
# run tests as user "cirrus" instead of root
|
||||
- pw useradd cirrus -m
|
||||
- chown -R cirrus:cirrus .
|
||||
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...
|
||||
|
||||
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
@@ -1 +0,0 @@
|
||||
go.sum linguist-generated
|
||||
3
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
3
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
@@ -5,3 +5,6 @@
|
||||
# Output of go build ./cmd/fsnotify
|
||||
/fsnotify
|
||||
/fsnotify.exe
|
||||
|
||||
/test/kqueue
|
||||
/test/a.out
|
||||
|
||||
67
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
67
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
@@ -1,8 +1,69 @@
|
||||
# Changelog
|
||||
|
||||
Unreleased
|
||||
----------
|
||||
Nothing yet.
|
||||
1.9.0 2024-04-04
|
||||
----------------
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- all: make BufferedWatcher buffered again ([#657])
|
||||
|
||||
- inotify: fix race when adding/removing watches while a watched path is being
|
||||
deleted ([#678], [#686])
|
||||
|
||||
- inotify: don't send empty event if a watched path is unmounted ([#655])
|
||||
|
||||
- inotify: don't register duplicate watches when watching both a symlink and its
|
||||
target; previously that would get "half-added" and removing the second would
|
||||
panic ([#679])
|
||||
|
||||
- kqueue: fix watching relative symlinks ([#681])
|
||||
|
||||
- kqueue: correctly mark pre-existing entries when watching a link to a dir on
|
||||
kqueue ([#682])
|
||||
|
||||
- illumos: don't send error if changed file is deleted while processing the
|
||||
event ([#678])
|
||||
|
||||
|
||||
[#657]: https://github.com/fsnotify/fsnotify/pull/657
|
||||
[#678]: https://github.com/fsnotify/fsnotify/pull/678
|
||||
[#686]: https://github.com/fsnotify/fsnotify/pull/686
|
||||
[#655]: https://github.com/fsnotify/fsnotify/pull/655
|
||||
[#681]: https://github.com/fsnotify/fsnotify/pull/681
|
||||
[#679]: https://github.com/fsnotify/fsnotify/pull/679
|
||||
[#682]: https://github.com/fsnotify/fsnotify/pull/682
|
||||
|
||||
1.8.0 2024-10-31
|
||||
----------------
|
||||
|
||||
### Additions
|
||||
|
||||
- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
|
||||
|
||||
- kqueue: ignore events with Ident=0 ([#590])
|
||||
|
||||
- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
|
||||
|
||||
- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
|
||||
|
||||
- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
|
||||
|
||||
- inotify: fix panic when calling Remove() in a goroutine ([#650])
|
||||
|
||||
- fen: allow watching subdirectories of watched directories ([#621])
|
||||
|
||||
[#590]: https://github.com/fsnotify/fsnotify/pull/590
|
||||
[#610]: https://github.com/fsnotify/fsnotify/pull/610
|
||||
[#617]: https://github.com/fsnotify/fsnotify/pull/617
|
||||
[#619]: https://github.com/fsnotify/fsnotify/pull/619
|
||||
[#620]: https://github.com/fsnotify/fsnotify/pull/620
|
||||
[#621]: https://github.com/fsnotify/fsnotify/pull/621
|
||||
[#625]: https://github.com/fsnotify/fsnotify/pull/625
|
||||
[#650]: https://github.com/fsnotify/fsnotify/pull/650
|
||||
|
||||
1.7.0 - 2023-10-22
|
||||
------------------
|
||||
|
||||
121
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
121
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
@@ -1,7 +1,7 @@
|
||||
Thank you for your interest in contributing to fsnotify! We try to review and
|
||||
merge PRs in a reasonable timeframe, but please be aware that:
|
||||
|
||||
- To avoid "wasted" work, please discus changes on the issue tracker first. You
|
||||
- To avoid "wasted" work, please discuss changes on the issue tracker first. You
|
||||
can just send PRs, but they may end up being rejected for one reason or the
|
||||
other.
|
||||
|
||||
@@ -20,6 +20,125 @@ platforms. Testing different platforms locally can be done with something like
|
||||
|
||||
Use the `-short` flag to make the "stress test" run faster.
|
||||
|
||||
Writing new tests
|
||||
-----------------
|
||||
Scripts in the testdata directory allow creating test cases in a "shell-like"
|
||||
syntax. The basic format is:
|
||||
|
||||
script
|
||||
|
||||
Output:
|
||||
desired output
|
||||
|
||||
For example:
|
||||
|
||||
# Create a new empty file with some data.
|
||||
watch /
|
||||
echo data >/file
|
||||
|
||||
Output:
|
||||
create /file
|
||||
write /file
|
||||
|
||||
Just create a new file to add a new test; select which tests to run with
|
||||
`-run TestScript/[path]`.
|
||||
|
||||
script
|
||||
------
|
||||
The script is a "shell-like" script:
|
||||
|
||||
cmd arg arg
|
||||
|
||||
Comments are supported with `#`:
|
||||
|
||||
# Comment
|
||||
cmd arg arg # Comment
|
||||
|
||||
All operations are done in a temp directory; a path like "/foo" is rewritten to
|
||||
"/tmp/TestFoo/foo".
|
||||
|
||||
Arguments can be quoted with `"` or `'`; there are no escapes and they're
|
||||
functionally identical right now, but this may change in the future, so best to
|
||||
assume shell-like rules.
|
||||
|
||||
touch "/file with spaces"
|
||||
|
||||
End-of-line escapes with `\` are not supported.
|
||||
|
||||
### Supported commands
|
||||
|
||||
watch path [ops] # Watch the path, reporting events for it. Nothing is
|
||||
# watched by default. Optionally a list of ops can be
|
||||
# given, as with AddWith(path, WithOps(...)).
|
||||
unwatch path # Stop watching the path.
|
||||
watchlist n # Assert watchlist length.
|
||||
|
||||
stop # Stop running the script; for debugging.
|
||||
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
|
||||
parallel by default, so -parallel=1 is probably a good
|
||||
idea).
|
||||
print [any strings] # Print text to stdout; for debugging.
|
||||
|
||||
touch path
|
||||
mkdir [-p] dir
|
||||
ln -s target link # Only ln -s supported.
|
||||
mkfifo path
|
||||
mknod dev path
|
||||
mv src dst
|
||||
rm [-r] path
|
||||
chmod mode path # Octal only
|
||||
sleep time-in-ms
|
||||
|
||||
cat path # Read path (does nothing with the data; just reads it).
|
||||
echo str >>path # Append "str" to "path".
|
||||
echo str >path # Truncate "path" and write "str".
|
||||
|
||||
require reason # Skip the test if "reason" is true; "skip" and
|
||||
skip reason # "require" behave identical; it supports both for
|
||||
# readability. Possible reasons are:
|
||||
#
|
||||
# always Always skip this test.
|
||||
# symlink Symlinks are supported (requires admin
|
||||
# permissions on Windows).
|
||||
# mkfifo Platform doesn't support FIFO named sockets.
|
||||
# mknod Platform doesn't support device nodes.
|
||||
|
||||
|
||||
output
|
||||
------
|
||||
After `Output:` the desired output is given; this is indented by convention, but
|
||||
that's not required.
|
||||
|
||||
The format of that is:
|
||||
|
||||
# Comment
|
||||
event path # Comment
|
||||
|
||||
system:
|
||||
event path
|
||||
system2:
|
||||
event path
|
||||
|
||||
Every event is one line, and any whitespace between the event and path are
|
||||
ignored. The path can optionally be surrounded in ". Anything after a "#" is
|
||||
ignored.
|
||||
|
||||
Platform-specific tests can be added after GOOS; for example:
|
||||
|
||||
watch /
|
||||
touch /file
|
||||
|
||||
Output:
|
||||
# Tested if nothing else matches
|
||||
create /file
|
||||
|
||||
# Windows-specific test.
|
||||
windows:
|
||||
write /file
|
||||
|
||||
You can specify multiple platforms with a comma (e.g. "windows, linux:").
|
||||
"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
|
||||
|
||||
|
||||
[goon]: https://github.com/arp242/goon
|
||||
[Vagrant]: https://www.vagrantup.com/
|
||||
|
||||
2
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
2
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
@@ -15,7 +15,6 @@ Platform support:
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FEN | illumos | Supported |
|
||||
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
|
||||
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
|
||||
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
|
||||
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
@@ -25,7 +24,6 @@ untested.
|
||||
|
||||
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
|
||||
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
|
||||
[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
401
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
401
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
@@ -1,162 +1,44 @@
|
||||
//go:build solaris
|
||||
// +build solaris
|
||||
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
// FEN backend for illumos (supported) and Solaris (untested, but should work).
|
||||
//
|
||||
// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
type fen struct {
|
||||
*shared
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
|
||||
mu sync.Mutex
|
||||
port *unix.EventPort
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
dirs map[string]struct{} // Explicitly watched directories
|
||||
watches map[string]struct{} // Explicitly watched non-directories
|
||||
dirs map[string]Op // Explicitly watched directories
|
||||
watches map[string]Op // Explicitly watched non-directories
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return NewBufferedWatcher(0)
|
||||
}
|
||||
var defaultBufferSize = 0
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
w := &Watcher{
|
||||
Events: make(chan Event, sz),
|
||||
Errors: make(chan error),
|
||||
dirs: make(map[string]struct{}),
|
||||
watches: make(map[string]struct{}),
|
||||
done: make(chan struct{}),
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
w := &fen{
|
||||
shared: newShared(ev, errs),
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
dirs: make(map[string]Op),
|
||||
watches: make(map[string]Op),
|
||||
}
|
||||
|
||||
var err error
|
||||
@@ -169,104 +51,28 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// sendEvent attempts to send an event to the user, returning true if the event
|
||||
// was put in the channel successfully and false if the watcher has been closed.
|
||||
func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
|
||||
select {
|
||||
case w.Events <- Event{Name: name, Op: op}:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// sendError attempts to send an error to the user, returning true if the error
|
||||
// was put in the channel successfully and false if the watcher has been closed.
|
||||
func (w *Watcher) sendError(err error) (sent bool) {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
// Take the lock used by associateFile to prevent lingering events from
|
||||
// being processed after the close
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.isClosed() {
|
||||
func (w *fen) Close() error {
|
||||
if w.shared.close() {
|
||||
return nil
|
||||
}
|
||||
close(w.done)
|
||||
return w.port.Close()
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||
func (w *fen) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
func (w *fen) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if w.port.PathIsWatched(name) {
|
||||
return nil
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
_ = getOptions(opts...)
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
// Currently we resolve symlinks that were explicitly requested to be
|
||||
// watched. Otherwise we would use LStat here.
|
||||
@@ -283,7 +89,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.dirs[name] = struct{}{}
|
||||
w.dirs[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
@@ -294,26 +100,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[name] = struct{}{}
|
||||
w.watches[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
func (w *fen) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if !w.port.PathIsWatched(name) {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
// The user has expressed an intent. Immediately remove this name from
|
||||
// whichever watch list it might be in. If it's not in there the delete
|
||||
@@ -346,7 +148,7 @@ func (w *Watcher) Remove(name string) error {
|
||||
}
|
||||
|
||||
// readEvents contains the main loop that runs in a goroutine watching for events.
|
||||
func (w *Watcher) readEvents() {
|
||||
func (w *fen) readEvents() {
|
||||
// If this function returns, the watcher has been closed and we can close
|
||||
// these channels
|
||||
defer func() {
|
||||
@@ -367,7 +169,7 @@ func (w *Watcher) readEvents() {
|
||||
return
|
||||
}
|
||||
// There was an error not caused by calling w.Close()
|
||||
if !w.sendError(err) {
|
||||
if !w.sendError(fmt.Errorf("port.Get: %w", err)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -382,17 +184,19 @@ func (w *Watcher) readEvents() {
|
||||
continue
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(pevent.Path, pevent.Events)
|
||||
}
|
||||
|
||||
err = w.handleEvent(&pevent)
|
||||
if err != nil {
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||
func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -418,7 +222,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha
|
||||
// bitmap matches more than one event type (e.g. the file was both modified and
|
||||
// had the attributes changed between when the association was created and the
|
||||
// when event was returned)
|
||||
func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
func (w *fen) handleEvent(event *unix.PortEvent) error {
|
||||
var (
|
||||
events = event.Events
|
||||
path = event.Path
|
||||
@@ -433,13 +237,13 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
isWatched := watchedDir || watchedPath
|
||||
|
||||
if events&unix.FILE_DELETE != 0 {
|
||||
if !w.sendEvent(path, Remove) {
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_FROM != 0 {
|
||||
if !w.sendEvent(path, Rename) {
|
||||
if !w.sendEvent(Event{Name: path, Op: Rename}) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the new file name
|
||||
@@ -453,7 +257,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
|
||||
// inotify reports a Remove event in this case, so we simulate this
|
||||
// here.
|
||||
if !w.sendEvent(path, Remove) {
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the file that was removed
|
||||
@@ -487,7 +291,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
// get here, the sudirectory is already gone. Clearly we were watching
|
||||
// this path but now it is gone. Let's tell the user that it was
|
||||
// removed.
|
||||
if !w.sendEvent(path, Remove) {
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Suppress extra write events on removed directories; they are not
|
||||
@@ -502,7 +306,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
if err != nil {
|
||||
// The symlink still exists, but the target is gone. Report the
|
||||
// Remove similar to above.
|
||||
if !w.sendEvent(path, Remove) {
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Don't return the error
|
||||
@@ -510,18 +314,12 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
}
|
||||
|
||||
if events&unix.FILE_MODIFIED != 0 {
|
||||
if fmode.IsDir() {
|
||||
if watchedDir {
|
||||
if err := w.updateDirectory(path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !w.sendEvent(path, Write) {
|
||||
return nil
|
||||
}
|
||||
if fmode.IsDir() && watchedDir {
|
||||
if err := w.updateDirectory(path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !w.sendEvent(path, Write) {
|
||||
if !w.sendEvent(Event{Name: path, Op: Write}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -529,7 +327,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
if events&unix.FILE_ATTRIB != 0 && stat != nil {
|
||||
// Only send Chmod if perms changed
|
||||
if stat.Mode().Perm() != fmode.Perm() {
|
||||
if !w.sendEvent(path, Chmod) {
|
||||
if !w.sendEvent(Event{Name: path, Op: Chmod}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -538,17 +336,27 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
if stat != nil {
|
||||
// If we get here, it means we've hit an event above that requires us to
|
||||
// continue watching the file or directory
|
||||
return w.associateFile(path, stat, isWatched)
|
||||
err := w.associateFile(path, stat, isWatched)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// Path may have been removed since the stat.
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) updateDirectory(path string) error {
|
||||
// The directory was modified, so we must find unwatched entities and watch
|
||||
// them. If something was removed from the directory, nothing will happen,
|
||||
// as everything else should still be watched.
|
||||
// The directory was modified, so we must find unwatched entities and watch
|
||||
// them. If something was removed from the directory, nothing will happen, as
|
||||
// everything else should still be watched.
|
||||
func (w *fen) updateDirectory(path string) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
// Directory no longer exists: probably just deleted since we got the
|
||||
// event.
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -563,19 +371,22 @@ func (w *Watcher) updateDirectory(path string) error {
|
||||
return err
|
||||
}
|
||||
err = w.associateFile(path, finfo, false)
|
||||
if err != nil {
|
||||
if !w.sendError(err) {
|
||||
return nil
|
||||
}
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// File may have disappeared between getting the dir listing and
|
||||
// adding the port: that's okay to ignore.
|
||||
continue
|
||||
}
|
||||
if !w.sendEvent(path, Create) {
|
||||
if !w.sendError(err) {
|
||||
return nil
|
||||
}
|
||||
if !w.sendEvent(Event{Name: path, Op: Create}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||
func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
@@ -593,34 +404,42 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro
|
||||
// cleared up that discrepancy. The most likely cause is that the event
|
||||
// has fired but we haven't processed it yet.
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil && err != unix.ENOENT {
|
||||
return err
|
||||
if err != nil && !errors.Is(err, unix.ENOENT) {
|
||||
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
|
||||
}
|
||||
}
|
||||
// FILE_NOFOLLOW means we watch symlinks themselves rather than their
|
||||
// targets.
|
||||
events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
|
||||
if follow {
|
||||
// We *DO* follow symlinks for explicitly watched entries.
|
||||
events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
|
||||
|
||||
var events int
|
||||
if !follow {
|
||||
// Watch symlinks themselves rather than their targets unless this entry
|
||||
// is explicitly watched.
|
||||
events |= unix.FILE_NOFOLLOW
|
||||
}
|
||||
return w.port.AssociatePath(path, stat,
|
||||
events,
|
||||
stat.Mode())
|
||||
if true { // TODO: implement withOps()
|
||||
events |= unix.FILE_MODIFIED
|
||||
}
|
||||
if true {
|
||||
events |= unix.FILE_ATTRIB
|
||||
}
|
||||
err := w.port.AssociatePath(path, stat, events, stat.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("port.AssociatePath(%q): %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||
if !w.port.PathIsWatched(path) {
|
||||
return nil
|
||||
}
|
||||
return w.port.DissociatePath(path)
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
func (w *fen) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
@@ -638,3 +457,11 @@ func (w *Watcher) WatchList() []string {
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
func (w *fen) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
735
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
735
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
@@ -1,8 +1,4 @@
|
||||
//go:build linux && !appengine
|
||||
// +build linux,!appengine
|
||||
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
@@ -10,127 +6,21 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
type inotify struct {
|
||||
*shared
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
|
||||
// Store fd here as os.File.Read() will no longer return on close after
|
||||
@@ -138,21 +28,41 @@ type Watcher struct {
|
||||
fd int
|
||||
inotifyFile *os.File
|
||||
watches *watches
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
closeMu sync.Mutex
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
|
||||
// Store rename cookies in an array, with the index wrapping to 0. Almost
|
||||
// all of the time what we get is a MOVED_FROM to set the cookie and the
|
||||
// next event inotify sends will be MOVED_TO to read it. However, this is
|
||||
// not guaranteed – as described in inotify(7) – and we may get other events
|
||||
// between the two MOVED_* events (including other MOVED_* ones).
|
||||
//
|
||||
// A second issue is that moving a file outside the watched directory will
|
||||
// trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
|
||||
// read and delete it. So just storing it in a map would slowly leak memory.
|
||||
//
|
||||
// Doing it like this gives us a simple fast LRU-cache that won't allocate.
|
||||
// Ten items should be more than enough for our purpose, and a loop over
|
||||
// such a short array is faster than a map access anyway (not that it hugely
|
||||
// matters since we're talking about hundreds of ns at the most, but still).
|
||||
cookies [10]koekje
|
||||
cookieIndex uint8
|
||||
cookiesMu sync.Mutex
|
||||
}
|
||||
|
||||
type (
|
||||
watches struct {
|
||||
mu sync.RWMutex
|
||||
wd map[uint32]*watch // wd → watch
|
||||
path map[string]uint32 // pathname → wd
|
||||
}
|
||||
watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
path string // Watch path.
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
path string // Watch path.
|
||||
recurse bool // Recursion with ./...?
|
||||
}
|
||||
koekje struct {
|
||||
cookie uint32
|
||||
path string
|
||||
}
|
||||
)
|
||||
|
||||
@@ -163,57 +73,43 @@ func newWatches() *watches {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) len() int {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
return len(w.wd)
|
||||
}
|
||||
|
||||
func (w *watches) add(ww *watch) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.wd[ww.wd] = ww
|
||||
w.path[ww.path] = ww.wd
|
||||
}
|
||||
|
||||
func (w *watches) remove(wd uint32) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
delete(w.path, w.wd[wd].path)
|
||||
delete(w.wd, wd)
|
||||
}
|
||||
|
||||
func (w *watches) removePath(path string) (uint32, bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] }
|
||||
func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] }
|
||||
func (w *watches) len() int { return len(w.wd) }
|
||||
func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd }
|
||||
func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) }
|
||||
|
||||
func (w *watches) removePath(path string) ([]uint32, error) {
|
||||
path, recurse := recursivePath(path)
|
||||
wd, ok := w.path[path]
|
||||
if !ok {
|
||||
return 0, false
|
||||
return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
|
||||
}
|
||||
|
||||
watch := w.wd[wd]
|
||||
if recurse && !watch.recurse {
|
||||
return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
|
||||
}
|
||||
|
||||
delete(w.path, path)
|
||||
delete(w.wd, wd)
|
||||
if !watch.recurse {
|
||||
return []uint32{wd}, nil
|
||||
}
|
||||
|
||||
return wd, true
|
||||
}
|
||||
|
||||
func (w *watches) byPath(path string) *watch {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
return w.wd[w.path[path]]
|
||||
}
|
||||
|
||||
func (w *watches) byWd(wd uint32) *watch {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
return w.wd[wd]
|
||||
wds := make([]uint32, 0, 8)
|
||||
wds = append(wds, wd)
|
||||
for p, rwd := range w.path {
|
||||
if strings.HasPrefix(p, path) {
|
||||
delete(w.path, p)
|
||||
delete(w.wd, rwd)
|
||||
wds = append(wds, rwd)
|
||||
}
|
||||
}
|
||||
return wds, nil
|
||||
}
|
||||
|
||||
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
var existing *watch
|
||||
wd, ok := w.path[path]
|
||||
if ok {
|
||||
@@ -236,20 +132,9 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return NewBufferedWatcher(0)
|
||||
}
|
||||
var defaultBufferSize = 0
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
|
||||
// I/O operations won't terminate on close.
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
||||
@@ -257,13 +142,13 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
w := &inotify{
|
||||
shared: newShared(ev, errs),
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
fd: fd,
|
||||
inotifyFile: os.NewFile(uintptr(fd), ""),
|
||||
watches: newWatches(),
|
||||
Events: make(chan Event, sz),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
@@ -271,44 +156,10 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Returns true if the event was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendEvent(e Event) bool {
|
||||
select {
|
||||
case w.Events <- e:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendError(err error) bool {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.closeMu.Lock()
|
||||
if w.isClosed() {
|
||||
w.closeMu.Unlock()
|
||||
func (w *inotify) Close() error {
|
||||
if w.shared.close() {
|
||||
return nil
|
||||
}
|
||||
close(w.done)
|
||||
w.closeMu.Unlock()
|
||||
|
||||
// Causes any blocking reads to return with an error, provided the file
|
||||
// still supports deadline operations.
|
||||
@@ -317,84 +168,114 @@ func (w *Watcher) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
<-w.doneResp // Wait for readEvents() to finish.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||
func (w *inotify) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
func (w *inotify) AddWith(path string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), path)
|
||||
}
|
||||
|
||||
name = filepath.Clean(name)
|
||||
_ = getOptions(opts...)
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
add := func(path string, with withOpts, recurse bool) error {
|
||||
var flags uint32
|
||||
if with.noFollow {
|
||||
flags |= unix.IN_DONT_FOLLOW
|
||||
}
|
||||
if with.op.Has(Create) {
|
||||
flags |= unix.IN_CREATE
|
||||
}
|
||||
if with.op.Has(Write) {
|
||||
flags |= unix.IN_MODIFY
|
||||
}
|
||||
if with.op.Has(Remove) {
|
||||
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
}
|
||||
if with.op.Has(Rename) {
|
||||
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
|
||||
}
|
||||
if with.op.Has(Chmod) {
|
||||
flags |= unix.IN_ATTRIB
|
||||
}
|
||||
if with.op.Has(xUnportableOpen) {
|
||||
flags |= unix.IN_OPEN
|
||||
}
|
||||
if with.op.Has(xUnportableRead) {
|
||||
flags |= unix.IN_ACCESS
|
||||
}
|
||||
if with.op.Has(xUnportableCloseWrite) {
|
||||
flags |= unix.IN_CLOSE_WRITE
|
||||
}
|
||||
if with.op.Has(xUnportableCloseRead) {
|
||||
flags |= unix.IN_CLOSE_NOWRITE
|
||||
}
|
||||
return w.register(path, flags, recurse)
|
||||
}
|
||||
|
||||
return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
path, recurse := recursivePath(path)
|
||||
if recurse {
|
||||
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() {
|
||||
if root == path {
|
||||
return fmt.Errorf("fsnotify: not a directory: %q", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send a Create event when adding new directory from a recursive
|
||||
// watch; this is for "mkdir -p one/two/three". Usually all those
|
||||
// directories will be created before we can set up watchers on the
|
||||
// subdirectories, so only "one" would be sent as a Create event and
|
||||
// not "one/two" and "one/two/three" (inotifywait -r has the same
|
||||
// problem).
|
||||
if with.sendCreate && root != path {
|
||||
w.sendEvent(Event{Name: root, Op: Create})
|
||||
}
|
||||
|
||||
return add(root, with, true)
|
||||
})
|
||||
}
|
||||
|
||||
return add(path, with, false)
|
||||
}
|
||||
|
||||
func (w *inotify) register(path string, flags uint32, recurse bool) error {
|
||||
return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
|
||||
if existing != nil {
|
||||
flags |= existing.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
|
||||
wd, err := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
wd, err := unix.InotifyAddWatch(w.fd, path, flags)
|
||||
if wd == -1 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if e, ok := w.watches.wd[uint32(wd)]; ok {
|
||||
return e, nil
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return &watch{
|
||||
wd: uint32(wd),
|
||||
path: name,
|
||||
flags: flags,
|
||||
wd: uint32(wd),
|
||||
path: path,
|
||||
flags: flags,
|
||||
recurse: recurse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -404,87 +285,80 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
})
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
func (w *inotify) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.remove(filepath.Clean(name))
|
||||
}
|
||||
|
||||
func (w *Watcher) remove(name string) error {
|
||||
wd, ok := w.watches.removePath(name)
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
func (w *inotify) remove(name string) error {
|
||||
wds, err := w.watches.removePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
success, errno := unix.InotifyRmWatch(w.fd, wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case;
|
||||
// The only two possible errors are:
|
||||
//
|
||||
// - EBADF, which happens when w.fd is not a valid file descriptor
|
||||
// of any kind.
|
||||
// - EINVAL, which is when fd is not an inotify descriptor or wd
|
||||
// is not a valid watch descriptor. Watch descriptors are
|
||||
// invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they
|
||||
// are watching is deleted.
|
||||
return errno
|
||||
for _, wd := range wds {
|
||||
_, err := unix.InotifyRmWatch(w.fd, wd)
|
||||
if err != nil {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every
|
||||
// case; the only two possible errors are:
|
||||
//
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of
|
||||
// any kind.
|
||||
//
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is
|
||||
// not a valid watch descriptor. Watch descriptors are invalidated
|
||||
// when they are removed explicitly or implicitly; explicitly by
|
||||
// inotify_rm_watch, implicitly when the file they are watching is
|
||||
// deleted.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
func (w *inotify) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
entries := make([]string, 0, w.watches.len())
|
||||
w.watches.mu.RLock()
|
||||
for pathname := range w.watches.path {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
w.watches.mu.RUnlock()
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
func (w *inotify) readEvents() {
|
||||
defer func() {
|
||||
close(w.doneResp)
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
var (
|
||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
errno error // Syscall errno
|
||||
)
|
||||
var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := w.inotifyFile.Read(buf[:])
|
||||
switch {
|
||||
case errors.Unwrap(err) == os.ErrClosed:
|
||||
return
|
||||
case err != nil:
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
@@ -492,13 +366,9 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
err := errors.New("notify: short read in readEvents()") // Read was too short.
|
||||
if n == 0 {
|
||||
err = io.EOF // If EOF is received. This should really never happen.
|
||||
} else if n < 0 {
|
||||
err = errno // If an error occurred while reading.
|
||||
} else {
|
||||
err = errors.New("notify: short read in readEvents()") // Read was too short.
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
@@ -506,74 +376,146 @@ func (w *Watcher) readEvents() {
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't know how many events we just read into the buffer While the
|
||||
// offset points to at least one whole event.
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
var (
|
||||
// Point "raw" to the event in the buffer
|
||||
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
mask = uint32(raw.Mask)
|
||||
nameLen = uint32(raw.Len)
|
||||
)
|
||||
// Point to the event in the buffer.
|
||||
inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
if !w.sendError(ErrEventOverflow) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
watch := w.watches.byWd(uint32(raw.Wd))
|
||||
|
||||
// inotify will automatically remove the watch on deletes; just need
|
||||
// to clean our state here.
|
||||
if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
w.watches.remove(watch.wd)
|
||||
ev, ok := w.handleEvent(inEvent, &buf, offset)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// We can't really update the state when a watched path is moved;
|
||||
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
|
||||
// the watch.
|
||||
if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||
err := w.remove(watch.path)
|
||||
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var name string
|
||||
if watch != nil {
|
||||
name = watch.path
|
||||
}
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := w.newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if mask&unix.IN_IGNORED == 0 {
|
||||
if !w.sendEvent(event) {
|
||||
return
|
||||
}
|
||||
if !w.sendEvent(ev) {
|
||||
return
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + nameLen
|
||||
offset += unix.SizeofInotifyEvent + inEvent.Len
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
/// If the event happened to the watched directory or the watched file, the
|
||||
/// kernel doesn't append the filename to the event, but we would like to
|
||||
/// always fill the the "Name" field with a valid filename. We retrieve the
|
||||
/// path of the watch from the "paths" map.
|
||||
///
|
||||
/// Can be nil if Remove() was called in another goroutine for this path
|
||||
/// inbetween reading the events from the kernel and reading the internal
|
||||
/// state. Not much we can do about it, so just skip. See #616.
|
||||
watch := w.watches.byWd(uint32(inEvent.Wd))
|
||||
if watch == nil {
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
var (
|
||||
name = watch.path
|
||||
nameLen = uint32(inEvent.Len)
|
||||
)
|
||||
if nameLen > 0 {
|
||||
/// Point "bytes" at the first byte of the filename
|
||||
bb := *buf
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00")
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(name, inEvent.Mask, inEvent.Cookie)
|
||||
}
|
||||
|
||||
if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 {
|
||||
w.watches.remove(watch)
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
// inotify will automatically remove the watch on deletes; just need
|
||||
// to clean our state here.
|
||||
if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
w.watches.remove(watch)
|
||||
}
|
||||
|
||||
// We can't really update the state when a watched path is moved; only
|
||||
// IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch.
|
||||
if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||
if watch.recurse { // Do nothing
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
err := w.remove(watch.path)
|
||||
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
|
||||
if !w.sendError(err) {
|
||||
return Event{}, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Skip if we're watching both this path and the parent; the parent will
|
||||
/// already send a delete so no need to do it twice.
|
||||
if inEvent.Mask&unix.IN_DELETE_SELF != 0 {
|
||||
_, ok := w.watches.path[filepath.Dir(watch.path)]
|
||||
if ok {
|
||||
return Event{}, true
|
||||
}
|
||||
}
|
||||
|
||||
ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie)
|
||||
// Need to update watch path for recurse.
|
||||
if watch.recurse {
|
||||
isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR
|
||||
/// New directory created: set up watch on it.
|
||||
if isDir && ev.Has(Create) {
|
||||
err := w.register(ev.Name, watch.flags, true)
|
||||
if !w.sendError(err) {
|
||||
return Event{}, false
|
||||
}
|
||||
|
||||
// This was a directory rename, so we need to update all the
|
||||
// children.
|
||||
//
|
||||
// TODO: this is of course pretty slow; we should use a better data
|
||||
// structure for storing all of this, e.g. store children in the
|
||||
// watch. I have some code for this in my kqueue refactor we can use
|
||||
// in the future. For now I'm okay with this as it's not publicly
|
||||
// available. Correctness first, performance second.
|
||||
if ev.renamedFrom != "" {
|
||||
for k, ww := range w.watches.wd {
|
||||
if k == watch.wd || ww.path == ev.Name {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(ww.path, ev.renamedFrom) {
|
||||
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
|
||||
w.watches.wd[k] = ww
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ev, true
|
||||
}
|
||||
|
||||
func (w *inotify) isRecursive(path string) bool {
|
||||
ww := w.watches.byPath(path)
|
||||
if ww == nil { // path could be a file, so also check the Dir.
|
||||
ww = w.watches.byPath(filepath.Dir(path))
|
||||
}
|
||||
return ww != nil && ww.recurse
|
||||
}
|
||||
|
||||
func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
@@ -584,11 +526,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_OPEN == unix.IN_OPEN {
|
||||
e.Op |= xUnportableOpen
|
||||
}
|
||||
if mask&unix.IN_ACCESS == unix.IN_ACCESS {
|
||||
e.Op |= xUnportableRead
|
||||
}
|
||||
if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
|
||||
e.Op |= xUnportableCloseWrite
|
||||
}
|
||||
if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
|
||||
e.Op |= xUnportableCloseRead
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
|
||||
if cookie != 0 {
|
||||
if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
w.cookiesMu.Lock()
|
||||
w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
|
||||
w.cookieIndex++
|
||||
if w.cookieIndex > 9 {
|
||||
w.cookieIndex = 0
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
} else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
w.cookiesMu.Lock()
|
||||
var prev string
|
||||
for _, c := range w.cookies {
|
||||
if c.cookie == cookie {
|
||||
prev = c.path
|
||||
break
|
||||
}
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
e.renamedFrom = prev
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (w *inotify) xSupports(op Op) bool {
|
||||
return true // Supports everything.
|
||||
}
|
||||
|
||||
func (w *inotify) state() {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
for wd, ww := range w.watches.wd {
|
||||
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
|
||||
}
|
||||
}
|
||||
|
||||
791
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
791
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
File diff suppressed because it is too large
Load Diff
203
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
203
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
@@ -1,205 +1,22 @@
|
||||
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
|
||||
// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
||||
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "errors"
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
type other struct {
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return nil, errors.New("fsnotify not supported on the current platform")
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error { return nil }
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string { return nil }
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return nil }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error { return nil }
|
||||
func (w *other) Close() error { return nil }
|
||||
func (w *other) WatchList() []string { return nil }
|
||||
func (w *other) Add(name string) error { return nil }
|
||||
func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
|
||||
func (w *other) Remove(name string) error { return nil }
|
||||
func (w *other) xSupports(op Op) bool { return false }
|
||||
|
||||
321
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
321
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
@@ -1,12 +1,8 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
// Windows backend based on ReadDirectoryChangesW()
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
//
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
@@ -19,196 +15,80 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
type readDirChangesW struct {
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
|
||||
port windows.Handle // Handle to completion port
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
done chan chan<- error
|
||||
|
||||
mu sync.Mutex // Protects access to watches, closed
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
closed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return NewBufferedWatcher(50)
|
||||
}
|
||||
var defaultBufferSize = 50
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
w := &Watcher{
|
||||
w := &readDirChangesW{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, sz),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
done: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
func (w *readDirChangesW) isClosed() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.closed
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
event := w.newEvent(name, uint32(mask))
|
||||
event.renamedFrom = renamedFrom
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case ch := <-w.done:
|
||||
w.done <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendError(err error) bool {
|
||||
func (w *readDirChangesW) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.quit:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
func (w *readDirChangesW) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
@@ -217,66 +97,30 @@ func (w *Watcher) Close() error {
|
||||
w.closed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
// Send "done" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
w.done <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||
func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
if with.bufsize < 4096 {
|
||||
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
|
||||
}
|
||||
@@ -295,18 +139,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
func (w *readDirChangesW) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
@@ -320,11 +160,7 @@ func (w *Watcher) Remove(name string) error {
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
func (w *readDirChangesW) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
@@ -335,7 +171,13 @@ func (w *Watcher) WatchList() []string {
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for _, entry := range w.watches {
|
||||
for _, watchEntry := range entry {
|
||||
entries = append(entries, watchEntry.path)
|
||||
for name := range watchEntry.names {
|
||||
entries = append(entries, filepath.Join(watchEntry.path, name))
|
||||
}
|
||||
// the directory itself is being watched
|
||||
if watchEntry.mask != 0 {
|
||||
entries = append(entries, watchEntry.path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -361,7 +203,7 @@ const (
|
||||
sysFSIGNORED = 0x8000
|
||||
)
|
||||
|
||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
@@ -417,7 +259,7 @@ type (
|
||||
watchMap map[uint32]indexMap
|
||||
)
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
func (w *readDirChangesW) wakeupReader() error {
|
||||
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", err)
|
||||
@@ -425,7 +267,7 @@ func (w *Watcher) wakeupReader() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) getDir(pathname string) (dir string, err error) {
|
||||
func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
|
||||
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
|
||||
if err != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", err)
|
||||
@@ -439,7 +281,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (w *Watcher) getIno(path string) (ino *inode, err error) {
|
||||
func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
|
||||
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
|
||||
windows.FILE_LIST_DIRECTORY,
|
||||
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
|
||||
@@ -482,9 +324,8 @@ func (m watchMap) set(ino *inode, watch *watch) {
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
//pathname, recurse := recursivePath(pathname)
|
||||
recurse := false
|
||||
func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
@@ -538,7 +379,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
func (w *readDirChangesW) remWatch(pathname string) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
@@ -566,11 +407,11 @@ func (w *Watcher) remWatch(pathname string) error {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
@@ -578,23 +419,23 @@ func (w *Watcher) remWatch(pathname string) error {
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
func (w *readDirChangesW) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
func (w *readDirChangesW) startRead(watch *watch) error {
|
||||
err := windows.CancelIo(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CancelIo", err))
|
||||
@@ -624,7 +465,7 @@ func (w *Watcher) startRead(watch *watch) error {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
||||
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
@@ -637,7 +478,7 @@ func (w *Watcher) startRead(watch *watch) error {
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
func (w *readDirChangesW) readEvents() {
|
||||
var (
|
||||
n uint32
|
||||
key uintptr
|
||||
@@ -652,7 +493,7 @@ func (w *Watcher) readEvents() {
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
case ch := <-w.done:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
@@ -700,7 +541,7 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
case windows.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
@@ -733,6 +574,10 @@ func (w *Watcher) readEvents() {
|
||||
name := windows.UTF16ToString(buf)
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
if debug {
|
||||
internal.Debug(fullname, raw.Action)
|
||||
}
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
@@ -761,21 +606,22 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
w.sendEvent(fullname, watch.names[name]&mask)
|
||||
}
|
||||
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
w.sendEvent(fullname, "", watch.names[name]&mask)
|
||||
}
|
||||
if raw.Action == windows.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||
w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
} else {
|
||||
w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
}
|
||||
|
||||
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = filepath.Join(watch.path, watch.rename)
|
||||
sendNameEvent()
|
||||
w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
@@ -787,8 +633,7 @@ func (w *Watcher) readEvents() {
|
||||
// Error!
|
||||
if offset >= n {
|
||||
//lint:ignore ST1005 Windows should be capitalized
|
||||
w.sendError(errors.New(
|
||||
"Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||
w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -799,7 +644,7 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
|
||||
func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
@@ -810,7 +655,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
|
||||
return m
|
||||
}
|
||||
|
||||
func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
|
||||
func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case windows.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
@@ -825,3 +670,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
370
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
370
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
@@ -3,19 +3,146 @@
|
||||
//
|
||||
// Currently supported systems:
|
||||
//
|
||||
// Linux 2.6.32+ via inotify
|
||||
// BSD, macOS via kqueue
|
||||
// Windows via ReadDirectoryChangesW
|
||||
// illumos via FEN
|
||||
// - Linux via inotify
|
||||
// - BSD, macOS via kqueue
|
||||
// - Windows via ReadDirectoryChangesW
|
||||
// - illumos via FEN
|
||||
//
|
||||
// # FSNOTIFY_DEBUG
|
||||
//
|
||||
// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
|
||||
// stderr. This can be useful to track down some problems, especially in cases
|
||||
// where fsnotify is used as an indirect dependency.
|
||||
//
|
||||
// Every event will be printed as soon as there's something useful to print,
|
||||
// with as little processing from fsnotify.
|
||||
//
|
||||
// Example output:
|
||||
//
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all files, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
b backend
|
||||
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// Event represents a file system notification.
|
||||
type Event struct {
|
||||
// Path to the file or directory.
|
||||
@@ -30,6 +157,16 @@ type Event struct {
|
||||
// This is a bitmask and some systems may send multiple operations at once.
|
||||
// Use the Event.Has() method instead of comparing with ==.
|
||||
Op Op
|
||||
|
||||
// Create events will have this set to the old path if it's a rename. This
|
||||
// only works when both the source and destination are watched. It's not
|
||||
// reliable when watching individual files, only directories.
|
||||
//
|
||||
// For example "mv /tmp/file /tmp/rename" will emit:
|
||||
//
|
||||
// Event{Op: Rename, Name: "/tmp/file"}
|
||||
// Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
|
||||
renamedFrom string
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
@@ -50,7 +187,7 @@ const (
|
||||
// example "remove to trash" is often a rename).
|
||||
Remove
|
||||
|
||||
// The path was renamed to something else; any watched on it will be
|
||||
// The path was renamed to something else; any watches on it will be
|
||||
// removed.
|
||||
Rename
|
||||
|
||||
@@ -60,15 +197,157 @@ const (
|
||||
// get triggered very frequently by some software. For example, Spotlight
|
||||
// indexing on macOS, anti-virus software, backup software, etc.
|
||||
Chmod
|
||||
|
||||
// File descriptor was opened.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableOpen
|
||||
|
||||
// File was read from.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableRead
|
||||
|
||||
// File opened for writing was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
//
|
||||
// The advantage of using this over Write is that it's more reliable than
|
||||
// waiting for Write events to stop. It's also faster (if you're not
|
||||
// listening to Write events): copying a file of a few GB can easily
|
||||
// generate tens of thousands of Write events in a short span of time.
|
||||
xUnportableCloseWrite
|
||||
|
||||
// File opened for reading was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableCloseRead
|
||||
)
|
||||
|
||||
// Common errors that can be reported.
|
||||
var (
|
||||
// ErrNonExistentWatch is used when Remove() is called on a path that's not
|
||||
// added.
|
||||
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
|
||||
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||
|
||||
// ErrClosed is used when trying to operate on a closed Watcher.
|
||||
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||
|
||||
// ErrEventOverflow is reported from the Errors channel when there are too
|
||||
// many events:
|
||||
//
|
||||
// - inotify: inotify returns IN_Q_OVERFLOW – because there are too
|
||||
// many queued events (the fs.inotify.max_queued_events
|
||||
// sysctl can be used to increase this).
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||
|
||||
// ErrUnsupported is returned by AddWith() when WithOps() specified an
|
||||
// Unportable event that's not supported on this platform.
|
||||
//lint:ignore ST1012 not relevant
|
||||
xErrUnsupported = errors.New("fsnotify: not supported with this backend")
|
||||
)
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
ev, errs := make(chan Event, defaultBufferSize), make(chan error)
|
||||
b, err := newBackend(ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
ev, errs := make(chan Event, sz), make(chan error)
|
||||
b, err := newBackend(ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(path string) error { return w.b.Add(path) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error { return w.b.Close() }
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// The order is undefined, and may differ per call. Returns nil if
|
||||
// [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string { return w.b.WatchList() }
|
||||
|
||||
// Supports reports if all the listed operations are supported by this platform.
|
||||
//
|
||||
// Create, Write, Remove, Rename, and Chmod are always supported. It can only
|
||||
// return false for an Op starting with Unportable.
|
||||
func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
|
||||
|
||||
func (o Op) String() string {
|
||||
var b strings.Builder
|
||||
if o.Has(Create) {
|
||||
@@ -80,6 +359,18 @@ func (o Op) String() string {
|
||||
if o.Has(Write) {
|
||||
b.WriteString("|WRITE")
|
||||
}
|
||||
if o.Has(xUnportableOpen) {
|
||||
b.WriteString("|OPEN")
|
||||
}
|
||||
if o.Has(xUnportableRead) {
|
||||
b.WriteString("|READ")
|
||||
}
|
||||
if o.Has(xUnportableCloseWrite) {
|
||||
b.WriteString("|CLOSE_WRITE")
|
||||
}
|
||||
if o.Has(xUnportableCloseRead) {
|
||||
b.WriteString("|CLOSE_READ")
|
||||
}
|
||||
if o.Has(Rename) {
|
||||
b.WriteString("|RENAME")
|
||||
}
|
||||
@@ -100,24 +391,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||
|
||||
// String returns a string representation of the event with their path.
|
||||
func (e Event) String() string {
|
||||
if e.renamedFrom != "" {
|
||||
return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
|
||||
}
|
||||
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
||||
}
|
||||
|
||||
type (
|
||||
backend interface {
|
||||
Add(string) error
|
||||
AddWith(string, ...addOpt) error
|
||||
Remove(string) error
|
||||
WatchList() []string
|
||||
Close() error
|
||||
xSupports(Op) bool
|
||||
}
|
||||
addOpt func(opt *withOpts)
|
||||
withOpts struct {
|
||||
bufsize int
|
||||
bufsize int
|
||||
op Op
|
||||
noFollow bool
|
||||
sendCreate bool
|
||||
}
|
||||
)
|
||||
|
||||
var debug = func() bool {
|
||||
// Check for exactly "1" (rather than mere existence) so we can add
|
||||
// options/flags in the future. I don't know if we ever want that, but it's
|
||||
// nice to leave the option open.
|
||||
return os.Getenv("FSNOTIFY_DEBUG") == "1"
|
||||
}()
|
||||
|
||||
var defaultOpts = withOpts{
|
||||
bufsize: 65536, // 64K
|
||||
op: Create | Write | Remove | Rename | Chmod,
|
||||
}
|
||||
|
||||
func getOptions(opts ...addOpt) withOpts {
|
||||
with := defaultOpts
|
||||
for _, o := range opts {
|
||||
o(&with)
|
||||
if o != nil {
|
||||
o(&with)
|
||||
}
|
||||
}
|
||||
return with
|
||||
}
|
||||
@@ -136,9 +451,44 @@ func WithBufferSize(bytes int) addOpt {
|
||||
return func(opt *withOpts) { opt.bufsize = bytes }
|
||||
}
|
||||
|
||||
// WithOps sets which operations to listen for. The default is [Create],
|
||||
// [Write], [Remove], [Rename], and [Chmod].
|
||||
//
|
||||
// Excluding operations you're not interested in can save quite a bit of CPU
|
||||
// time; in some use cases there may be hundreds of thousands of useless Write
|
||||
// or Chmod operations per second.
|
||||
//
|
||||
// This can also be used to add unportable operations not supported by all
|
||||
// platforms; unportable operations all start with "Unportable":
|
||||
// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
|
||||
// [UnportableCloseRead].
|
||||
//
|
||||
// AddWith returns an error when using an unportable operation that's not
|
||||
// supported. Use [Watcher.Support] to check for support.
|
||||
func withOps(op Op) addOpt {
|
||||
return func(opt *withOpts) { opt.op = op }
|
||||
}
|
||||
|
||||
// WithNoFollow disables following symlinks, so the symlinks themselves are
|
||||
// watched.
|
||||
func withNoFollow() addOpt {
|
||||
return func(opt *withOpts) { opt.noFollow = true }
|
||||
}
|
||||
|
||||
// "Internal" option for recursive watches on inotify.
|
||||
func withCreate() addOpt {
|
||||
return func(opt *withOpts) { opt.sendCreate = true }
|
||||
}
|
||||
|
||||
var enableRecurse = false
|
||||
|
||||
// Check if this path is recursive (ends with "/..." or "\..."), and return the
|
||||
// path with the /... stripped.
|
||||
func recursivePath(path string) (string, bool) {
|
||||
path = filepath.Clean(path)
|
||||
if !enableRecurse { // Only enabled in tests for now.
|
||||
return path, false
|
||||
}
|
||||
if filepath.Base(path) == "..." {
|
||||
return filepath.Dir(path), true
|
||||
}
|
||||
|
||||
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
Normal file
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
//go:build darwin
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSyscallEACCES = syscall.EACCES
|
||||
ErrUnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = l.Cur
|
||||
|
||||
if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
|
||||
maxfiles = uint64(n)
|
||||
}
|
||||
|
||||
if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
|
||||
maxfiles = uint64(n)
|
||||
}
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
Normal file
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE},
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_BACKGROUND", unix.NOTE_BACKGROUND},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_CRITICAL", unix.NOTE_CRITICAL},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS},
|
||||
{"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR},
|
||||
{"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL},
|
||||
{"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL},
|
||||
{"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK},
|
||||
{"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY},
|
||||
{"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_FUNLOCK", unix.NOTE_FUNLOCK},
|
||||
{"NOTE_LEEWAY", unix.NOTE_LEEWAY},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_MACHTIME", unix.NOTE_MACHTIME},
|
||||
{"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME},
|
||||
{"NOTE_NONE", unix.NOTE_NONE},
|
||||
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||
{"NOTE_OOB", unix.NOTE_OOB},
|
||||
//{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!)
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_REAP", unix.NOTE_REAP},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||
{"NOTE_SIGNAL", unix.NOTE_SIGNAL},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||
{"NOTE_VM_ERROR", unix.NOTE_VM_ERROR},
|
||||
{"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE},
|
||||
{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE},
|
||||
{"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
Normal file
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_OOB", unix.NOTE_OOB},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
Normal file
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ABSTIME", unix.NOTE_ABSTIME},
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_CLOSE", unix.NOTE_CLOSE},
|
||||
{"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FILE_POLL", unix.NOTE_FILE_POLL},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_MSECONDS", unix.NOTE_MSECONDS},
|
||||
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||
{"NOTE_OPEN", unix.NOTE_OPEN},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_READ", unix.NOTE_READ},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
Normal file
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, kevent *unix.Kevent_t) {
|
||||
mask := uint32(kevent.Fflags)
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||
}
|
||||
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
Normal file
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, mask, cookie uint32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"IN_ACCESS", unix.IN_ACCESS},
|
||||
{"IN_ATTRIB", unix.IN_ATTRIB},
|
||||
{"IN_CLOSE", unix.IN_CLOSE},
|
||||
{"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE},
|
||||
{"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE},
|
||||
{"IN_CREATE", unix.IN_CREATE},
|
||||
{"IN_DELETE", unix.IN_DELETE},
|
||||
{"IN_DELETE_SELF", unix.IN_DELETE_SELF},
|
||||
{"IN_IGNORED", unix.IN_IGNORED},
|
||||
{"IN_ISDIR", unix.IN_ISDIR},
|
||||
{"IN_MODIFY", unix.IN_MODIFY},
|
||||
{"IN_MOVE", unix.IN_MOVE},
|
||||
{"IN_MOVED_FROM", unix.IN_MOVED_FROM},
|
||||
{"IN_MOVED_TO", unix.IN_MOVED_TO},
|
||||
{"IN_MOVE_SELF", unix.IN_MOVE_SELF},
|
||||
{"IN_OPEN", unix.IN_OPEN},
|
||||
{"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW},
|
||||
{"IN_UNMOUNT", unix.IN_UNMOUNT},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
var c string
|
||||
if cookie > 0 {
|
||||
c = fmt.Sprintf("(cookie: %d) ", cookie)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n",
|
||||
time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name)
|
||||
}
|
||||
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
Normal file
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
Normal file
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
// {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386?
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EOF", unix.NOTE_EOF},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRUNCATE", unix.NOTE_TRUNCATE},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
Normal file
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, mask int32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m int32
|
||||
}{
|
||||
{"FILE_ACCESS", unix.FILE_ACCESS},
|
||||
{"FILE_MODIFIED", unix.FILE_MODIFIED},
|
||||
{"FILE_ATTRIB", unix.FILE_ATTRIB},
|
||||
{"FILE_TRUNC", unix.FILE_TRUNC},
|
||||
{"FILE_NOFOLLOW", unix.FILE_NOFOLLOW},
|
||||
{"FILE_DELETE", unix.FILE_DELETE},
|
||||
{"FILE_RENAME_TO", unix.FILE_RENAME_TO},
|
||||
{"FILE_RENAME_FROM", unix.FILE_RENAME_FROM},
|
||||
{"UNMOUNTED", unix.UNMOUNTED},
|
||||
{"MOUNTEDOVER", unix.MOUNTEDOVER},
|
||||
{"FILE_EXCEPTION", unix.FILE_EXCEPTION},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||
}
|
||||
40
vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
generated
vendored
Normal file
40
vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func Debug(name string, mask uint32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED},
|
||||
{"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED},
|
||||
{"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED},
|
||||
{"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME},
|
||||
{"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name))
|
||||
}
|
||||
31
vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
generated
vendored
Normal file
31
vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
//go:build freebsd
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSyscallEACCES = syscall.EACCES
|
||||
ErrUnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = uint64(l.Cur)
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) }
|
||||
2
vendor/github.com/fsnotify/fsnotify/internal/internal.go
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/internal/internal.go
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package internal contains some helpers.
|
||||
package internal
|
||||
31
vendor/github.com/fsnotify/fsnotify/internal/unix.go
generated
vendored
Normal file
31
vendor/github.com/fsnotify/fsnotify/internal/unix.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
//go:build !windows && !darwin && !freebsd && !plan9
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSyscallEACCES = syscall.EACCES
|
||||
ErrUnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = uint64(l.Cur)
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||
7
vendor/github.com/fsnotify/fsnotify/internal/unix2.go
generated
vendored
Normal file
7
vendor/github.com/fsnotify/fsnotify/internal/unix2.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build !windows
|
||||
|
||||
package internal
|
||||
|
||||
func HasPrivilegesForSymlink() bool {
|
||||
return true
|
||||
}
|
||||
41
vendor/github.com/fsnotify/fsnotify/internal/windows.go
generated
vendored
Normal file
41
vendor/github.com/fsnotify/fsnotify/internal/windows.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
//go:build windows
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Just a dummy.
|
||||
var (
|
||||
ErrSyscallEACCES = errors.New("dummy")
|
||||
ErrUnixEACCES = errors.New("dummy")
|
||||
)
|
||||
|
||||
func SetRlimit() {}
|
||||
func Maxfiles() uint64 { return 1<<64 - 1 }
|
||||
func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") }
|
||||
func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") }
|
||||
|
||||
func HasPrivilegesForSymlink() bool {
|
||||
var sid *windows.SID
|
||||
err := windows.AllocateAndInitializeSid(
|
||||
&windows.SECURITY_NT_AUTHORITY,
|
||||
2,
|
||||
windows.SECURITY_BUILTIN_DOMAIN_RID,
|
||||
windows.DOMAIN_ALIAS_RID_ADMINS,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
&sid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer windows.FreeSid(sid)
|
||||
token := windows.Token(0)
|
||||
member, err := token.IsMember(sid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return member || token.IsElevated()
|
||||
}
|
||||
259
vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
generated
vendored
259
vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
generated
vendored
@@ -1,259 +0,0 @@
|
||||
#!/usr/bin/env zsh
|
||||
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
|
||||
setopt err_exit no_unset pipefail extended_glob
|
||||
|
||||
# Simple script to update the godoc comments on all watchers so you don't need
|
||||
# to update the same comment 5 times.
|
||||
|
||||
watcher=$(<<EOF
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
EOF
|
||||
)
|
||||
|
||||
new=$(<<EOF
|
||||
// NewWatcher creates a new Watcher.
|
||||
EOF
|
||||
)
|
||||
|
||||
newbuffered=$(<<EOF
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
EOF
|
||||
)
|
||||
|
||||
add=$(<<EOF
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
EOF
|
||||
)
|
||||
|
||||
addwith=$(<<EOF
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
EOF
|
||||
)
|
||||
|
||||
remove=$(<<EOF
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
EOF
|
||||
)
|
||||
|
||||
close=$(<<EOF
|
||||
// Close removes all watches and closes the Events channel.
|
||||
EOF
|
||||
)
|
||||
|
||||
watchlist=$(<<EOF
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
EOF
|
||||
)
|
||||
|
||||
events=$(<<EOF
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
EOF
|
||||
)
|
||||
|
||||
errors=$(<<EOF
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
EOF
|
||||
)
|
||||
|
||||
set-cmt() {
|
||||
local pat=$1
|
||||
local cmt=$2
|
||||
|
||||
IFS=$'\n' local files=($(grep -n $pat backend_*~*_test.go))
|
||||
for f in $files; do
|
||||
IFS=':' local fields=($=f)
|
||||
local file=$fields[1]
|
||||
local end=$(( $fields[2] - 1 ))
|
||||
|
||||
# Find start of comment.
|
||||
local start=0
|
||||
IFS=$'\n' local lines=($(head -n$end $file))
|
||||
for (( i = 1; i <= $#lines; i++ )); do
|
||||
local line=$lines[-$i]
|
||||
if ! grep -q '^[[:space:]]*//' <<<$line; then
|
||||
start=$(( end - (i - 2) ))
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
head -n $(( start - 1 )) $file >/tmp/x
|
||||
print -r -- $cmt >>/tmp/x
|
||||
tail -n+$(( end + 1 )) $file >>/tmp/x
|
||||
mv /tmp/x $file
|
||||
done
|
||||
}
|
||||
|
||||
set-cmt '^type Watcher struct ' $watcher
|
||||
set-cmt '^func NewWatcher(' $new
|
||||
set-cmt '^func NewBufferedWatcher(' $newbuffered
|
||||
set-cmt '^func (w \*Watcher) Add(' $add
|
||||
set-cmt '^func (w \*Watcher) AddWith(' $addwith
|
||||
set-cmt '^func (w \*Watcher) Remove(' $remove
|
||||
set-cmt '^func (w \*Watcher) Close(' $close
|
||||
set-cmt '^func (w \*Watcher) WatchList(' $watchlist
|
||||
set-cmt '^[[:space:]]*Events *chan Event$' $events
|
||||
set-cmt '^[[:space:]]*Errors *chan error$' $errors
|
||||
64
vendor/github.com/fsnotify/fsnotify/shared.go
generated
vendored
Normal file
64
vendor/github.com/fsnotify/fsnotify/shared.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package fsnotify
|
||||
|
||||
import "sync"
|
||||
|
||||
type shared struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
done chan struct{}
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newShared(ev chan Event, errs chan error) *shared {
|
||||
return &shared{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the event was sent, or false if watcher is closed.
|
||||
func (w *shared) sendEvent(e Event) bool {
|
||||
if e.Op == 0 {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Events <- e:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *shared) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *shared) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Mark as closed; returns true if it was already closed.
|
||||
func (w *shared) close() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.isClosed() {
|
||||
return true
|
||||
}
|
||||
close(w.done)
|
||||
return false
|
||||
}
|
||||
3
vendor/github.com/fsnotify/fsnotify/staticcheck.conf
generated
vendored
Normal file
3
vendor/github.com/fsnotify/fsnotify/staticcheck.conf
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
checks = ['all',
|
||||
'-U1000', # Don't complain about unused functions.
|
||||
]
|
||||
1
vendor/github.com/fsnotify/fsnotify/system_bsd.go
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/system_bsd.go
generated
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
|
||||
1
vendor/github.com/fsnotify/fsnotify/system_darwin.go
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/system_darwin.go
generated
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
|
||||
591
vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
591
vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
@@ -1,30 +1,31 @@
|
||||
# CBOR Codec in Go
|
||||
|
||||
<!-- [](#cbor-library-in-go) -->
|
||||
<h1>CBOR Codec <a href="https://pkg.go.dev/github.com/fxamacker/cbor/v2"><img src="https://raw.githubusercontent.com/fxamacker/images/refs/heads/master/cbor/go-logo-blue.svg" alt="Go logo" style="height: 1em;" align="right"></a></h1>
|
||||
|
||||
[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
|
||||
|
||||
CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
|
||||
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
|
||||
|
||||
See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
|
||||
See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer.
|
||||
|
||||
## fxamacker/cbor
|
||||
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22)
|
||||
[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
|
||||
[](#fuzzing-and-code-coverage)
|
||||
[](https://goreportcard.com/report/github.com/fxamacker/cbor)
|
||||
[](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage)
|
||||
|
||||
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||
|
||||
Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
|
||||
|
||||
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options.
|
||||
|
||||
Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
|
||||
|
||||
<details><summary>Highlights</summary><p/>
|
||||
<details><summary> 🔎 Highlights</summary><p/>
|
||||
|
||||
__🚀 Speed__
|
||||
|
||||
@@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili
|
||||
|
||||
__🗜️ Data Size__
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
|
||||
Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
|
||||
|
||||
__:jigsaw: Usability__
|
||||
|
||||
@@ -58,164 +59,205 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.
|
||||
|
||||
`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
|
||||
|
||||
By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||
Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data.
|
||||
|
||||
<details><summary>Example decoding with encoding/gob 💥 fatal error (out of memory)</summary><p/>
|
||||
> [!NOTE]
|
||||
> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`:
|
||||
>
|
||||
> | Codec | Speed (ns/op) | Memory | Allocs |
|
||||
> | :---- | ------------: | -----: | -----: |
|
||||
> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op |
|
||||
> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op |
|
||||
>
|
||||
> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference.
|
||||
>
|
||||
> <details><summary> 🔎 Benchmark details </summary><p/>
|
||||
>
|
||||
> Latest comparison for decoding CBOR data to Go `[]byte`:
|
||||
> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores)
|
||||
> - go test -bench=. -benchmem -count=20
|
||||
>
|
||||
> #### Prior comparisons
|
||||
>
|
||||
> | Codec | Speed (ns/op) | Memory | Allocs |
|
||||
> | :---- | ------------: | -----: | -----: |
|
||||
> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
|
||||
> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
|
||||
> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
|
||||
> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
|
||||
>
|
||||
> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
> - go1.19.6, linux/amd64, i5-13600K (DDR4)
|
||||
> - go test -bench=. -benchmem -count=20
|
||||
>
|
||||
> </details>
|
||||
|
||||
```Go
|
||||
// Example of encoding/gob having "fatal error: runtime: out of memory"
|
||||
// while decoding 181 bytes.
|
||||
package main
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
)
|
||||
In contrast, some codecs can crash or use excessive resources while decoding bad data.
|
||||
|
||||
// Example data is from https://github.com/golang/go/issues/24446
|
||||
// (shortened to 181 bytes).
|
||||
const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
|
||||
"01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
|
||||
"860001013001ff860001013001ffb80000001eff850401010e3030303030" +
|
||||
"30303030303030303001ff3000010c0104000016ffb70201010830303030" +
|
||||
"3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
|
||||
"303030303030303030303030303030303030303030303030303030303030" +
|
||||
"30"
|
||||
> [!WARNING]
|
||||
> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||
>
|
||||
> <details><summary> 🔎 gob fatal error (out of memory) 💥 decoding 181 bytes</summary><p/>
|
||||
>
|
||||
> ```Go
|
||||
> // Example of encoding/gob having "fatal error: runtime: out of memory"
|
||||
> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024).
|
||||
> package main
|
||||
> import (
|
||||
> "bytes"
|
||||
> "encoding/gob"
|
||||
> "encoding/hex"
|
||||
> "fmt"
|
||||
> )
|
||||
>
|
||||
> // Example data is from https://github.com/golang/go/issues/24446
|
||||
> // (shortened to 181 bytes).
|
||||
> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
|
||||
> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
|
||||
> "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
|
||||
> "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
|
||||
> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
|
||||
> "303030303030303030303030303030303030303030303030303030303030" +
|
||||
> "30"
|
||||
>
|
||||
> type X struct {
|
||||
> J *X
|
||||
> K map[string]int
|
||||
> }
|
||||
>
|
||||
> func main() {
|
||||
> raw, _ := hex.DecodeString(data)
|
||||
> decoder := gob.NewDecoder(bytes.NewReader(raw))
|
||||
>
|
||||
> var x X
|
||||
> decoder.Decode(&x) // fatal error: runtime: out of memory
|
||||
> fmt.Println("Decoding finished.")
|
||||
> }
|
||||
> ```
|
||||
>
|
||||
>
|
||||
> </details>
|
||||
|
||||
type X struct {
|
||||
J *X
|
||||
K map[string]int
|
||||
}
|
||||
### Smaller Encodings with Struct Tag Options
|
||||
|
||||
func main() {
|
||||
raw, _ := hex.DecodeString(data)
|
||||
decoder := gob.NewDecoder(bytes.NewReader(raw))
|
||||
Struct tags automatically reduce encoded size of structs and improve speed.
|
||||
|
||||
var x X
|
||||
decoder.Decode(&x) // fatal error: runtime: out of memory
|
||||
fmt.Println("Decoding finished.")
|
||||
}
|
||||
```
|
||||
We can write less code by using struct tag options:
|
||||
- `toarray`: encode without field names (decode back to original struct)
|
||||
- `keyasint`: encode field names as integers (decode back to original struct)
|
||||
- `omitempty`: omit empty field when encoding
|
||||
- `omitzero`: omit zero-value field when encoding
|
||||
|
||||
<hr/>
|
||||
As a special case, struct field tag "-" omits the field.
|
||||
|
||||
</details>
|
||||
|
||||
`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
|
||||
decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
|
||||
|
||||
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||
| :---- | ------------: | -----: | -----: |
|
||||
| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
|
||||
| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
|
||||
|
||||
<details><summary>Benchmark details</summary><p/>
|
||||
|
||||
Latest comparison used:
|
||||
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
|
||||
- go test -bench=. -benchmem -count=20
|
||||
|
||||
#### Prior comparisons
|
||||
|
||||
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||
| :---- | ------------: | -----: | -----: |
|
||||
| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
|
||||
| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
|
||||
| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
|
||||
| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
|
||||
|
||||
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
- go1.19.6, linux/amd64, i5-13600K (DDR4)
|
||||
- go test -bench=. -benchmem -count=20
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
### Smaller Encodings with Struct Tags
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||
|
||||
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
|
||||
https://go.dev/play/p/YxwvfPdFQG2
|
||||
|
||||
```Go
|
||||
// Example encoding nested struct (with omitempty tag)
|
||||
// - encoding/json: 18 byte JSON
|
||||
// - fxamacker/cbor: 1 byte CBOR
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
type GrandChild struct {
|
||||
Quux int `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Child struct {
|
||||
Baz int `json:",omitempty"`
|
||||
Qux GrandChild `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Foo Child `json:",omitempty"`
|
||||
Bar int `json:",omitempty"`
|
||||
}
|
||||
|
||||
func cb() {
|
||||
results, _ := cbor.Marshal(Parent{})
|
||||
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||
|
||||
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||
fmt.Println("DN: " + text)
|
||||
}
|
||||
|
||||
func js() {
|
||||
results, _ := json.Marshal(Parent{})
|
||||
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||
|
||||
text := string(results) // JSON
|
||||
fmt.Println("JSON: " + text)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cb()
|
||||
fmt.Println("-------------")
|
||||
js()
|
||||
}
|
||||
```
|
||||
|
||||
Output (DN is Diagnostic Notation):
|
||||
```
|
||||
hex(CBOR): a0
|
||||
DN: {}
|
||||
-------------
|
||||
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||
JSON: {"Foo":{"Qux":{}}}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
Example using different struct tags together:
|
||||
NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field.
|
||||
|
||||

|
||||
|
||||
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
|
||||
> [!NOTE]
|
||||
> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte!
|
||||
> - `encoding/json`: 18 bytes of JSON
|
||||
> - `fxamacker/cbor`: 1 byte of CBOR
|
||||
>
|
||||
> <details><summary> 🔎 Encoding 3-level nested Go struct with omitempty</summary><p/>
|
||||
>
|
||||
> https://go.dev/play/p/YxwvfPdFQG2
|
||||
>
|
||||
> ```Go
|
||||
> // Example encoding nested struct (with omitempty tag)
|
||||
> // - encoding/json: 18 byte JSON
|
||||
> // - fxamacker/cbor: 1 byte CBOR
|
||||
>
|
||||
> package main
|
||||
>
|
||||
> import (
|
||||
> "encoding/hex"
|
||||
> "encoding/json"
|
||||
> "fmt"
|
||||
>
|
||||
> "github.com/fxamacker/cbor/v2"
|
||||
> )
|
||||
>
|
||||
> type GrandChild struct {
|
||||
> Quux int `json:",omitempty"`
|
||||
> }
|
||||
>
|
||||
> type Child struct {
|
||||
> Baz int `json:",omitempty"`
|
||||
> Qux GrandChild `json:",omitempty"`
|
||||
> }
|
||||
>
|
||||
> type Parent struct {
|
||||
> Foo Child `json:",omitempty"`
|
||||
> Bar int `json:",omitempty"`
|
||||
> }
|
||||
>
|
||||
> func cb() {
|
||||
> results, _ := cbor.Marshal(Parent{})
|
||||
> fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||
>
|
||||
> text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||
> fmt.Println("DN: " + text)
|
||||
> }
|
||||
>
|
||||
> func js() {
|
||||
> results, _ := json.Marshal(Parent{})
|
||||
> fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||
>
|
||||
> text := string(results) // JSON
|
||||
> fmt.Println("JSON: " + text)
|
||||
> }
|
||||
>
|
||||
> func main() {
|
||||
> cb()
|
||||
> fmt.Println("-------------")
|
||||
> js()
|
||||
> }
|
||||
> ```
|
||||
>
|
||||
> Output (DN is Diagnostic Notation):
|
||||
> ```
|
||||
> hex(CBOR): a0
|
||||
> DN: {}
|
||||
> -------------
|
||||
> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||
> JSON: {"Foo":{"Qux":{}}}
|
||||
> ```
|
||||
>
|
||||
> </details>
|
||||
|
||||
|
||||
## Quick Start
|
||||
|
||||
__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
|
||||
|
||||
> [!TIP]
|
||||
>
|
||||
> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta).
|
||||
>
|
||||
> <details><summary> 🔎 More about tinygo feature branch</summary>
|
||||
>
|
||||
> ### Tinygo
|
||||
>
|
||||
> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go).
|
||||
>
|
||||
> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo.
|
||||
>
|
||||
> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet.
|
||||
>
|
||||
> Changes in this feature branch only affect tinygo compiled software. Summary of changes:
|
||||
> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33.
|
||||
> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature.
|
||||
> - encoding error message can be different when encoding function type.
|
||||
>
|
||||
> Related tinygo issues:
|
||||
> - https://github.com/tinygo-org/tinygo/issues/4277
|
||||
> - https://github.com/tinygo-org/tinygo/issues/4458
|
||||
>
|
||||
> </details>
|
||||
|
||||
|
||||
### Key Points
|
||||
|
||||
This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
|
||||
@@ -252,16 +294,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
|
||||
// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
|
||||
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
|
||||
|
||||
// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
|
||||
// but new funcs UnmarshalFirst and DiagnoseFirst do not.
|
||||
// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but
|
||||
// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes.
|
||||
```
|
||||
|
||||
__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
|
||||
|
||||
- Different CBOR libraries may use different default settings.
|
||||
- CBOR-based formats or protocols usually require specific settings.
|
||||
|
||||
For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
|
||||
> [!IMPORTANT]
|
||||
> CBOR settings allow trade-offs between speed, security, encoding size, etc.
|
||||
>
|
||||
> - Different CBOR libraries may use different default settings.
|
||||
> - CBOR-based formats or protocols usually require specific settings.
|
||||
>
|
||||
> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
|
||||
|
||||
### Presets
|
||||
|
||||
@@ -312,9 +355,63 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
|
||||
|
||||
### Struct Tags
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||
Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs.
|
||||
|
||||
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
As a special case, struct field tag "-" omits the field.
|
||||
|
||||
<details><summary> 🔎 Example encoding with struct field tag "-"</summary><p/>
|
||||
|
||||
https://go.dev/play/p/aWEIFxd7InX
|
||||
|
||||
```Go
|
||||
// https://github.com/fxamacker/cbor/issues/652
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
// The `cbor:"-"` tag omits the Type field when encoding to CBOR.
|
||||
type Entity struct {
|
||||
_ struct{} `cbor:",toarray"`
|
||||
ID uint64 `json:"id"`
|
||||
Type string `cbor:"-" json:"typeOf"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
entity := Entity{
|
||||
ID: 1,
|
||||
Type: "int64",
|
||||
Name: "Identifier",
|
||||
}
|
||||
|
||||
c, _ := cbor.Marshal(entity)
|
||||
diag, _ := cbor.Diagnose(c)
|
||||
fmt.Printf("CBOR in hex: %x\n", c)
|
||||
fmt.Printf("CBOR in edn: %s\n", diag)
|
||||
|
||||
j, _ := json.Marshal(entity)
|
||||
fmt.Printf("JSON: %s\n", string(j))
|
||||
|
||||
fmt.Printf("JSON encoding is %d bytes\n", len(j))
|
||||
fmt.Printf("CBOR encoding is %d bytes\n", len(c))
|
||||
|
||||
// Output:
|
||||
// CBOR in hex: 82016a4964656e746966696572
|
||||
// CBOR in edn: [1, "Identifier"]
|
||||
// JSON: {"id":1,"typeOf":"int64","name":"Identifier"}
|
||||
// JSON encoding is 45 bytes
|
||||
// CBOR encoding is 13 bytes
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary> 🔎 Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
|
||||
https://go.dev/play/p/YxwvfPdFQG2
|
||||
|
||||
@@ -382,13 +479,13 @@ JSON: {"Foo":{"Qux":{}}}
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>Example using several struct tags</summary><p/>
|
||||
<details><summary> 🔎 Example using struct tag options</summary><p/>
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
||||
Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
|
||||
Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
|
||||
|
||||
### CBOR Tags
|
||||
|
||||
@@ -404,7 +501,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
|
||||
|
||||
`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
|
||||
|
||||
<details><summary>Example using TagSet and TagOptions</summary><p/>
|
||||
<details><summary> 🔎 Example using TagSet and TagOptions</summary><p/>
|
||||
|
||||
```go
|
||||
// Use signedCWT struct defined in "Decoding CWT" example.
|
||||
@@ -430,16 +527,149 @@ if err := dm.Unmarshal(data, &v); err != nil {
|
||||
em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
|
||||
|
||||
// Marshal signedCWT with tag number.
|
||||
if data, err := cbor.Marshal(v); err != nil {
|
||||
if data, err := em.Marshal(v); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces.
|
||||
|
||||
Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc.
|
||||
|
||||
The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2).
|
||||
|
||||
<details><summary> 🔎 Example using Embedded JSON Tag for CBOR (tag 262)</summary>
|
||||
|
||||
```go
|
||||
// https://github.com/fxamacker/cbor/issues/657
|
||||
|
||||
package cbor_test
|
||||
|
||||
// NOTE: RFC 8949 does not mention tag number 262. IANA assigned
|
||||
// CBOR tag number 262 as "Embedded JSON Object" specified by the
|
||||
// document Embedded JSON Tag for CBOR:
|
||||
//
|
||||
// "Tag 262 can be applied to a byte string (major type 2) to indicate
|
||||
// that the byte string is a JSON Object. The length of the byte string
|
||||
// indicates the content."
|
||||
//
|
||||
// For more info, see Embedded JSON Tag for CBOR at:
|
||||
// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
// cborTagNumForEmbeddedJSON is the CBOR tag number 262.
|
||||
const cborTagNumForEmbeddedJSON = 262
|
||||
|
||||
// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item
|
||||
// with tag number 262 and the tag content is a JSON object "embedded" as a
|
||||
// CBOR byte string (major type 2).
|
||||
type EmbeddedJSON struct {
|
||||
any
|
||||
}
|
||||
|
||||
func NewEmbeddedJSON(val any) EmbeddedJSON {
|
||||
return EmbeddedJSON{val}
|
||||
}
|
||||
|
||||
// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the
|
||||
// tag number 262 and the tag content is a JSON object that is
|
||||
// "embedded" as a CBOR byte string.
|
||||
func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) {
|
||||
// Encode v to JSON object.
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create cbor.Tag representing a tagged CBOR data item.
|
||||
tag := cbor.Tag{
|
||||
Number: cborTagNumForEmbeddedJSON,
|
||||
Content: data,
|
||||
}
|
||||
|
||||
// Marshal to a tagged CBOR data item.
|
||||
return cbor.Marshal(tag)
|
||||
}
|
||||
|
||||
// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON.
|
||||
// The byte slice provided to this function must contain a single
|
||||
// tagged CBOR data item with the tag number 262 and tag content
|
||||
// must be a JSON object "embedded" as a CBOR byte string.
|
||||
func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error {
|
||||
// Unmarshal tagged CBOR data item.
|
||||
var tag cbor.Tag
|
||||
if err := cbor.Unmarshal(b, &tag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check tag number.
|
||||
if tag.Number != cborTagNumForEmbeddedJSON {
|
||||
return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON)
|
||||
}
|
||||
|
||||
// Check tag content.
|
||||
jsonData, isByteString := tag.Content.([]byte)
|
||||
if !isByteString {
|
||||
return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content)
|
||||
}
|
||||
|
||||
// Unmarshal JSON object.
|
||||
return json.Unmarshal(jsonData, v)
|
||||
}
|
||||
|
||||
// MarshalJSON encodes EmbeddedJSON to a JSON object.
|
||||
func (v EmbeddedJSON) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(v.any)
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes a JSON object.
|
||||
func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error {
|
||||
dec := json.NewDecoder(bytes.NewReader(b))
|
||||
dec.UseNumber()
|
||||
return dec.Decode(&v.any)
|
||||
}
|
||||
|
||||
func Example_embeddedJSONTagForCBOR() {
|
||||
value := NewEmbeddedJSON(map[string]any{
|
||||
"name": "gopher",
|
||||
"id": json.Number("42"),
|
||||
})
|
||||
|
||||
data, err := cbor.Marshal(value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("cbor: %x\n", data)
|
||||
|
||||
var v EmbeddedJSON
|
||||
err = cbor.Unmarshal(data, &v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("%+v\n", v.any)
|
||||
for k, v := range v.any.(map[string]any) {
|
||||
fmt.Printf(" %s: %v (%T)\n", k, v, v)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
### Functions and Interfaces
|
||||
|
||||
<details><summary>Functions and interfaces at a glance</summary><p/>
|
||||
<details><summary> 🔎 Functions and interfaces at a glance</summary><p/>
|
||||
|
||||
Common functions with same API as `encoding/json`:
|
||||
- `Marshal`, `Unmarshal`
|
||||
@@ -453,7 +683,7 @@ because RFC 8949 treats CBOR data item with remaining bytes as malformed.
|
||||
Other useful functions:
|
||||
- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
|
||||
- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
|
||||
- `Wellformed` returns true if the the CBOR data item is well-formed.
|
||||
- `Wellformed` returns true if the CBOR data item is well-formed.
|
||||
|
||||
Interfaces identical or comparable to Go `encoding` packages include:
|
||||
`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
|
||||
@@ -472,15 +702,28 @@ Default limits may need to be increased for systems handling very large data (e.
|
||||
|
||||
## Status
|
||||
|
||||
v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
|
||||
[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs.
|
||||
- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string.
|
||||
- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function.
|
||||
- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR.
|
||||
|
||||
v2.9.0 passed fuzz tests and is production quality.
|
||||
|
||||
The minimum version of Go required to build:
|
||||
- v2.8.0 and newer releases require go 1.20+.
|
||||
- v2.7.1 and older releases require go 1.17+.
|
||||
|
||||
For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
|
||||
|
||||
### Prior Release
|
||||
### Prior Releases
|
||||
|
||||
[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality.
|
||||
|
||||
[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
|
||||
|
||||
[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
|
||||
|
||||
v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||
[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||
|
||||
__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
|
||||
|
||||
@@ -489,7 +732,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0
|
||||
See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
|
||||
|
||||
<!--
|
||||
<details><summary>👉 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
|
||||
<details><summary> 🔎 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
|
||||
|
||||
TODO: Update to v2.4.0 vs 2.5.0 (not beta2).
|
||||
|
||||
@@ -549,9 +792,9 @@ geomean 2.782
|
||||
|
||||
## Who uses fxamacker/cbor
|
||||
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Confidential Computing Consortium, ConsenSys, EdgeX Foundry, F5, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes, Let's Encrypt (ISRG), Linaro, Linux Foundation, Matrix.org, Microsoft, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Red Hat OpenShift, Smallstep, Tailscale, Taurus SA, TIBCO, Veraison, and others.
|
||||
|
||||
`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
|
||||
`fxamacker/cbor` passed multiple confidential security assessments in 2022. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) assessed a subset of fxamacker/cbor v2.4.
|
||||
|
||||
## Standards
|
||||
|
||||
@@ -588,7 +831,7 @@ By default, decoder treats time values of floating-point NaN and Infinity as if
|
||||
__Click to expand topic:__
|
||||
|
||||
<details>
|
||||
<summary>Duplicate Map Keys</summary><p>
|
||||
<summary> 🔎 Duplicate Map Keys</summary><p>
|
||||
|
||||
This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
|
||||
|
||||
@@ -601,7 +844,7 @@ APF suffix means "Allow Partial Fill" so the destination map or struct can conta
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Tag Validity</summary><p>
|
||||
<summary> 🔎 Tag Validity</summary><p>
|
||||
|
||||
This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
|
||||
|
||||
|
||||
27
vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
27
vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
@@ -38,11 +38,38 @@ func (bs ByteString) MarshalCBOR() ([]byte, error) {
|
||||
|
||||
// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
|
||||
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
|
||||
//
|
||||
// Deprecated: No longer used by this codec; kept for compatibility
|
||||
// with user apps that directly call this function.
|
||||
func (bs *ByteString) UnmarshalCBOR(data []byte) error {
|
||||
if bs == nil {
|
||||
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Check well-formedness of CBOR data item.
|
||||
// ByteString.UnmarshalCBOR() is exported, so
|
||||
// the codec needs to support same behavior for:
|
||||
// - Unmarshal(data, *ByteString)
|
||||
// - ByteString.UnmarshalCBOR(data)
|
||||
err := d.wellformed(false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bs.unmarshalCBOR(data)
|
||||
}
|
||||
|
||||
// unmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
|
||||
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
|
||||
// This function assumes data is well-formed, and does not perform bounds checking.
|
||||
// This function is called by Unmarshal().
|
||||
func (bs *ByteString) unmarshalCBOR(data []byte) error {
|
||||
if bs == nil {
|
||||
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
// Decoding CBOR null and CBOR undefined to ByteString resets data.
|
||||
// This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
|
||||
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||
|
||||
25
vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
25
vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
@@ -17,6 +17,7 @@ import (
|
||||
type encodeFuncs struct {
|
||||
ef encodeFunc
|
||||
ief isEmptyFunc
|
||||
izf isZeroFunc
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -31,10 +32,12 @@ type specialType int
|
||||
const (
|
||||
specialTypeNone specialType = iota
|
||||
specialTypeUnmarshalerIface
|
||||
specialTypeUnexportedUnmarshalerIface
|
||||
specialTypeEmptyIface
|
||||
specialTypeIface
|
||||
specialTypeTag
|
||||
specialTypeTime
|
||||
specialTypeJSONUnmarshalerIface
|
||||
)
|
||||
|
||||
type typeInfo struct {
|
||||
@@ -50,7 +53,7 @@ type typeInfo struct {
|
||||
func newTypeInfo(t reflect.Type) *typeInfo {
|
||||
tInfo := typeInfo{typ: t, kind: t.Kind()}
|
||||
|
||||
for t.Kind() == reflect.Ptr {
|
||||
for t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
@@ -69,8 +72,12 @@ func newTypeInfo(t reflect.Type) *typeInfo {
|
||||
tInfo.spclType = specialTypeTag
|
||||
} else if t == typeTime {
|
||||
tInfo.spclType = specialTypeTime
|
||||
} else if reflect.PtrTo(t).Implements(typeUnmarshaler) {
|
||||
} else if reflect.PointerTo(t).Implements(typeUnexportedUnmarshaler) {
|
||||
tInfo.spclType = specialTypeUnexportedUnmarshalerIface
|
||||
} else if reflect.PointerTo(t).Implements(typeUnmarshaler) {
|
||||
tInfo.spclType = specialTypeUnmarshalerIface
|
||||
} else if reflect.PointerTo(t).Implements(typeJSONUnmarshaler) {
|
||||
tInfo.spclType = specialTypeJSONUnmarshalerIface
|
||||
}
|
||||
|
||||
switch k {
|
||||
@@ -237,7 +244,7 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
|
||||
e := getEncodeBuffer()
|
||||
for i := 0; i < len(flds); i++ {
|
||||
// Get field's encodeFunc
|
||||
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||
flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
|
||||
if flds[i].ef == nil {
|
||||
err = &UnsupportedTypeError{t}
|
||||
break
|
||||
@@ -321,7 +328,7 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
|
||||
func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
|
||||
for i := 0; i < len(flds); i++ {
|
||||
// Get field's encodeFunc
|
||||
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||
flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
|
||||
if flds[i].ef == nil {
|
||||
structType := &encodingStructType{err: &UnsupportedTypeError{t}}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
@@ -337,14 +344,14 @@ func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructT
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) {
|
||||
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc, isZeroFunc) {
|
||||
if v, _ := encodeFuncCache.Load(t); v != nil {
|
||||
fs := v.(encodeFuncs)
|
||||
return fs.ef, fs.ief
|
||||
return fs.ef, fs.ief, fs.izf
|
||||
}
|
||||
ef, ief := getEncodeFuncInternal(t)
|
||||
encodeFuncCache.Store(t, encodeFuncs{ef, ief})
|
||||
return ef, ief
|
||||
ef, ief, izf := getEncodeFuncInternal(t)
|
||||
encodeFuncCache.Store(t, encodeFuncs{ef, ief, izf})
|
||||
return ef, ief, izf
|
||||
}
|
||||
|
||||
func getTypeInfo(t reflect.Type) *typeInfo {
|
||||
|
||||
9
vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
9
vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
@@ -5,6 +5,7 @@ package cbor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
@@ -180,3 +181,11 @@ func validBuiltinTag(tagNum uint64, contentHead byte) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Transcoder is a scheme for transcoding a single CBOR encoded data item to or from a different
|
||||
// data format.
|
||||
type Transcoder interface {
|
||||
// Transcode reads the data item in its source format from a Reader and writes a
|
||||
// corresponding representation in its destination format to a Writer.
|
||||
Transcode(dst io.Writer, src io.Reader) error
|
||||
}
|
||||
|
||||
425
vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
425
vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
@@ -4,6 +4,7 @@
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
@@ -94,7 +95,7 @@ import (
|
||||
//
|
||||
// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a
|
||||
// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often
|
||||
// used to mean "not present", unmarshalling CBOR null and undefined value
|
||||
// used to mean "not present", unmarshaling CBOR null and undefined value
|
||||
// into any other Go type has no effect and returns no error.
|
||||
//
|
||||
// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time),
|
||||
@@ -104,7 +105,7 @@ import (
|
||||
// if there are any remaining bytes following the first valid CBOR data item.
|
||||
// See UnmarshalFirst, if you want to unmarshal only the first
|
||||
// CBOR data item without ExtraneousDataError caused by remaining bytes.
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
func Unmarshal(data []byte, v any) error {
|
||||
return defaultDecMode.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
@@ -114,7 +115,7 @@ func Unmarshal(data []byte, v interface{}) error {
|
||||
// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
|
||||
//
|
||||
// See the documentation for Unmarshal for details.
|
||||
func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) {
|
||||
func UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
|
||||
return defaultDecMode.UnmarshalFirst(data, v)
|
||||
}
|
||||
|
||||
@@ -151,6 +152,10 @@ type Unmarshaler interface {
|
||||
UnmarshalCBOR([]byte) error
|
||||
}
|
||||
|
||||
type unmarshaler interface {
|
||||
unmarshalCBOR([]byte) error
|
||||
}
|
||||
|
||||
// InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
|
||||
type InvalidUnmarshalError struct {
|
||||
s string
|
||||
@@ -193,12 +198,12 @@ func (e *InvalidMapKeyTypeError) Error() string {
|
||||
|
||||
// DupMapKeyError describes detected duplicate map key in CBOR map.
|
||||
type DupMapKeyError struct {
|
||||
Key interface{}
|
||||
Key any
|
||||
Index int
|
||||
}
|
||||
|
||||
func (e *DupMapKeyError) Error() string {
|
||||
return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index)
|
||||
return fmt.Sprintf("cbor: found duplicate map key %#v at map element index %d", e.Key, e.Index)
|
||||
}
|
||||
|
||||
// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct.
|
||||
@@ -383,7 +388,7 @@ const (
|
||||
// - return UnmarshalTypeError if value doesn't fit into int64
|
||||
IntDecConvertSignedOrFail
|
||||
|
||||
// IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
|
||||
// IntDecConvertSignedOrBigInt affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
|
||||
// It makes CBOR integers (major type 0 and 1) decode to:
|
||||
// - int64 if value fits
|
||||
// - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64
|
||||
@@ -489,11 +494,11 @@ type BigIntDecMode int
|
||||
|
||||
const (
|
||||
// BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int)
|
||||
// when unmarshalling into a Go interface{}.
|
||||
// when unmarshaling into a Go interface{}.
|
||||
BigIntDecodeValue BigIntDecMode = iota
|
||||
|
||||
// BigIntDecodePointer makes CBOR bignum decode to *big.Int when
|
||||
// unmarshalling into a Go interface{}.
|
||||
// unmarshaling into a Go interface{}.
|
||||
BigIntDecodePointer
|
||||
|
||||
maxBigIntDecMode
|
||||
@@ -745,6 +750,25 @@ func (bum BinaryUnmarshalerMode) valid() bool {
|
||||
return bum >= 0 && bum < maxBinaryUnmarshalerMode
|
||||
}
|
||||
|
||||
// TextUnmarshalerMode specifies how to decode into types that implement
|
||||
// encoding.TextUnmarshaler.
|
||||
type TextUnmarshalerMode int
|
||||
|
||||
const (
|
||||
// TextUnmarshalerNone does not recognize TextUnmarshaler implementations during decode.
|
||||
TextUnmarshalerNone TextUnmarshalerMode = iota
|
||||
|
||||
// TextUnmarshalerTextString will invoke UnmarshalText on the contents of a CBOR text
|
||||
// string when decoding into a value that implements TextUnmarshaler.
|
||||
TextUnmarshalerTextString
|
||||
|
||||
maxTextUnmarshalerMode
|
||||
)
|
||||
|
||||
func (tum TextUnmarshalerMode) valid() bool {
|
||||
return tum >= 0 && tum < maxTextUnmarshalerMode
|
||||
}
|
||||
|
||||
// DecOptions specifies decoding options.
|
||||
type DecOptions struct {
|
||||
// DupMapKey specifies whether to enforce duplicate map key.
|
||||
@@ -793,7 +817,7 @@ type DecOptions struct {
|
||||
// TagsMd specifies whether to allow CBOR tags (major type 6).
|
||||
TagsMd TagsMode
|
||||
|
||||
// IntDec specifies which Go integer type (int64 or uint64) to use
|
||||
// IntDec specifies which Go integer type (int64, uint64, or [big.Int]) to use
|
||||
// when decoding CBOR int (major type 0 and 1) to Go interface{}.
|
||||
IntDec IntDecMode
|
||||
|
||||
@@ -807,7 +831,7 @@ type DecOptions struct {
|
||||
ExtraReturnErrors ExtraDecErrorCond
|
||||
|
||||
// DefaultMapType specifies Go map type to create and decode to
|
||||
// when unmarshalling CBOR into an empty interface value.
|
||||
// when unmarshaling CBOR into an empty interface value.
|
||||
// By default, unmarshal uses map[interface{}]interface{}.
|
||||
DefaultMapType reflect.Type
|
||||
|
||||
@@ -879,6 +903,15 @@ type DecOptions struct {
|
||||
// BinaryUnmarshaler specifies how to decode into types that implement
|
||||
// encoding.BinaryUnmarshaler.
|
||||
BinaryUnmarshaler BinaryUnmarshalerMode
|
||||
|
||||
// TextUnmarshaler specifies how to decode into types that implement
|
||||
// encoding.TextUnmarshaler.
|
||||
TextUnmarshaler TextUnmarshalerMode
|
||||
|
||||
// JSONUnmarshalerTranscoder sets the transcoding scheme used to unmarshal types that
|
||||
// implement json.Unmarshaler but do not also implement cbor.Unmarshaler. If nil, decoding
|
||||
// behavior is not influenced by whether or not a type implements json.Unmarshaler.
|
||||
JSONUnmarshalerTranscoder Transcoder
|
||||
}
|
||||
|
||||
// DecMode returns DecMode with immutable options and no tags (safe for concurrency).
|
||||
@@ -1091,33 +1124,39 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore
|
||||
return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler)))
|
||||
}
|
||||
|
||||
if !opts.TextUnmarshaler.valid() {
|
||||
return nil, errors.New("cbor: invalid TextUnmarshaler " + strconv.Itoa(int(opts.TextUnmarshaler)))
|
||||
}
|
||||
|
||||
dm := decMode{
|
||||
dupMapKey: opts.DupMapKey,
|
||||
timeTag: opts.TimeTag,
|
||||
maxNestedLevels: opts.MaxNestedLevels,
|
||||
maxArrayElements: opts.MaxArrayElements,
|
||||
maxMapPairs: opts.MaxMapPairs,
|
||||
indefLength: opts.IndefLength,
|
||||
tagsMd: opts.TagsMd,
|
||||
intDec: opts.IntDec,
|
||||
mapKeyByteString: opts.MapKeyByteString,
|
||||
extraReturnErrors: opts.ExtraReturnErrors,
|
||||
defaultMapType: opts.DefaultMapType,
|
||||
utf8: opts.UTF8,
|
||||
fieldNameMatching: opts.FieldNameMatching,
|
||||
bigIntDec: opts.BigIntDec,
|
||||
defaultByteStringType: opts.DefaultByteStringType,
|
||||
byteStringToString: opts.ByteStringToString,
|
||||
fieldNameByteString: opts.FieldNameByteString,
|
||||
unrecognizedTagToAny: opts.UnrecognizedTagToAny,
|
||||
timeTagToAny: opts.TimeTagToAny,
|
||||
simpleValues: simpleValues,
|
||||
nanDec: opts.NaN,
|
||||
infDec: opts.Inf,
|
||||
byteStringToTime: opts.ByteStringToTime,
|
||||
byteStringExpectedFormat: opts.ByteStringExpectedFormat,
|
||||
bignumTag: opts.BignumTag,
|
||||
binaryUnmarshaler: opts.BinaryUnmarshaler,
|
||||
dupMapKey: opts.DupMapKey,
|
||||
timeTag: opts.TimeTag,
|
||||
maxNestedLevels: opts.MaxNestedLevels,
|
||||
maxArrayElements: opts.MaxArrayElements,
|
||||
maxMapPairs: opts.MaxMapPairs,
|
||||
indefLength: opts.IndefLength,
|
||||
tagsMd: opts.TagsMd,
|
||||
intDec: opts.IntDec,
|
||||
mapKeyByteString: opts.MapKeyByteString,
|
||||
extraReturnErrors: opts.ExtraReturnErrors,
|
||||
defaultMapType: opts.DefaultMapType,
|
||||
utf8: opts.UTF8,
|
||||
fieldNameMatching: opts.FieldNameMatching,
|
||||
bigIntDec: opts.BigIntDec,
|
||||
defaultByteStringType: opts.DefaultByteStringType,
|
||||
byteStringToString: opts.ByteStringToString,
|
||||
fieldNameByteString: opts.FieldNameByteString,
|
||||
unrecognizedTagToAny: opts.UnrecognizedTagToAny,
|
||||
timeTagToAny: opts.TimeTagToAny,
|
||||
simpleValues: simpleValues,
|
||||
nanDec: opts.NaN,
|
||||
infDec: opts.Inf,
|
||||
byteStringToTime: opts.ByteStringToTime,
|
||||
byteStringExpectedFormat: opts.ByteStringExpectedFormat,
|
||||
bignumTag: opts.BignumTag,
|
||||
binaryUnmarshaler: opts.BinaryUnmarshaler,
|
||||
textUnmarshaler: opts.TextUnmarshaler,
|
||||
jsonUnmarshalerTranscoder: opts.JSONUnmarshalerTranscoder,
|
||||
}
|
||||
|
||||
return &dm, nil
|
||||
@@ -1130,7 +1169,7 @@ type DecMode interface {
|
||||
// Unmarshal returns an error.
|
||||
//
|
||||
// See the documentation for Unmarshal for details.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
Unmarshal(data []byte, v any) error
|
||||
|
||||
// UnmarshalFirst parses the first CBOR data item into the value pointed to by v
|
||||
// using the decoding mode. Any remaining bytes are returned in rest.
|
||||
@@ -1138,7 +1177,7 @@ type DecMode interface {
|
||||
// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
|
||||
//
|
||||
// See the documentation for Unmarshal for details.
|
||||
UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error)
|
||||
UnmarshalFirst(data []byte, v any) (rest []byte, err error)
|
||||
|
||||
// Valid checks whether data is a well-formed encoded CBOR data item and
|
||||
// that it complies with configurable restrictions such as MaxNestedLevels,
|
||||
@@ -1170,33 +1209,35 @@ type DecMode interface {
|
||||
}
|
||||
|
||||
type decMode struct {
|
||||
tags tagProvider
|
||||
dupMapKey DupMapKeyMode
|
||||
timeTag DecTagMode
|
||||
maxNestedLevels int
|
||||
maxArrayElements int
|
||||
maxMapPairs int
|
||||
indefLength IndefLengthMode
|
||||
tagsMd TagsMode
|
||||
intDec IntDecMode
|
||||
mapKeyByteString MapKeyByteStringMode
|
||||
extraReturnErrors ExtraDecErrorCond
|
||||
defaultMapType reflect.Type
|
||||
utf8 UTF8Mode
|
||||
fieldNameMatching FieldNameMatchingMode
|
||||
bigIntDec BigIntDecMode
|
||||
defaultByteStringType reflect.Type
|
||||
byteStringToString ByteStringToStringMode
|
||||
fieldNameByteString FieldNameByteStringMode
|
||||
unrecognizedTagToAny UnrecognizedTagToAnyMode
|
||||
timeTagToAny TimeTagToAnyMode
|
||||
simpleValues *SimpleValueRegistry
|
||||
nanDec NaNMode
|
||||
infDec InfMode
|
||||
byteStringToTime ByteStringToTimeMode
|
||||
byteStringExpectedFormat ByteStringExpectedFormatMode
|
||||
bignumTag BignumTagMode
|
||||
binaryUnmarshaler BinaryUnmarshalerMode
|
||||
tags tagProvider
|
||||
dupMapKey DupMapKeyMode
|
||||
timeTag DecTagMode
|
||||
maxNestedLevels int
|
||||
maxArrayElements int
|
||||
maxMapPairs int
|
||||
indefLength IndefLengthMode
|
||||
tagsMd TagsMode
|
||||
intDec IntDecMode
|
||||
mapKeyByteString MapKeyByteStringMode
|
||||
extraReturnErrors ExtraDecErrorCond
|
||||
defaultMapType reflect.Type
|
||||
utf8 UTF8Mode
|
||||
fieldNameMatching FieldNameMatchingMode
|
||||
bigIntDec BigIntDecMode
|
||||
defaultByteStringType reflect.Type
|
||||
byteStringToString ByteStringToStringMode
|
||||
fieldNameByteString FieldNameByteStringMode
|
||||
unrecognizedTagToAny UnrecognizedTagToAnyMode
|
||||
timeTagToAny TimeTagToAnyMode
|
||||
simpleValues *SimpleValueRegistry
|
||||
nanDec NaNMode
|
||||
infDec InfMode
|
||||
byteStringToTime ByteStringToTimeMode
|
||||
byteStringExpectedFormat ByteStringExpectedFormatMode
|
||||
bignumTag BignumTagMode
|
||||
binaryUnmarshaler BinaryUnmarshalerMode
|
||||
textUnmarshaler TextUnmarshalerMode
|
||||
jsonUnmarshalerTranscoder Transcoder
|
||||
}
|
||||
|
||||
var defaultDecMode, _ = DecOptions{}.decMode()
|
||||
@@ -1211,32 +1252,34 @@ func (dm *decMode) DecOptions() DecOptions {
|
||||
}
|
||||
|
||||
return DecOptions{
|
||||
DupMapKey: dm.dupMapKey,
|
||||
TimeTag: dm.timeTag,
|
||||
MaxNestedLevels: dm.maxNestedLevels,
|
||||
MaxArrayElements: dm.maxArrayElements,
|
||||
MaxMapPairs: dm.maxMapPairs,
|
||||
IndefLength: dm.indefLength,
|
||||
TagsMd: dm.tagsMd,
|
||||
IntDec: dm.intDec,
|
||||
MapKeyByteString: dm.mapKeyByteString,
|
||||
ExtraReturnErrors: dm.extraReturnErrors,
|
||||
DefaultMapType: dm.defaultMapType,
|
||||
UTF8: dm.utf8,
|
||||
FieldNameMatching: dm.fieldNameMatching,
|
||||
BigIntDec: dm.bigIntDec,
|
||||
DefaultByteStringType: dm.defaultByteStringType,
|
||||
ByteStringToString: dm.byteStringToString,
|
||||
FieldNameByteString: dm.fieldNameByteString,
|
||||
UnrecognizedTagToAny: dm.unrecognizedTagToAny,
|
||||
TimeTagToAny: dm.timeTagToAny,
|
||||
SimpleValues: simpleValues,
|
||||
NaN: dm.nanDec,
|
||||
Inf: dm.infDec,
|
||||
ByteStringToTime: dm.byteStringToTime,
|
||||
ByteStringExpectedFormat: dm.byteStringExpectedFormat,
|
||||
BignumTag: dm.bignumTag,
|
||||
BinaryUnmarshaler: dm.binaryUnmarshaler,
|
||||
DupMapKey: dm.dupMapKey,
|
||||
TimeTag: dm.timeTag,
|
||||
MaxNestedLevels: dm.maxNestedLevels,
|
||||
MaxArrayElements: dm.maxArrayElements,
|
||||
MaxMapPairs: dm.maxMapPairs,
|
||||
IndefLength: dm.indefLength,
|
||||
TagsMd: dm.tagsMd,
|
||||
IntDec: dm.intDec,
|
||||
MapKeyByteString: dm.mapKeyByteString,
|
||||
ExtraReturnErrors: dm.extraReturnErrors,
|
||||
DefaultMapType: dm.defaultMapType,
|
||||
UTF8: dm.utf8,
|
||||
FieldNameMatching: dm.fieldNameMatching,
|
||||
BigIntDec: dm.bigIntDec,
|
||||
DefaultByteStringType: dm.defaultByteStringType,
|
||||
ByteStringToString: dm.byteStringToString,
|
||||
FieldNameByteString: dm.fieldNameByteString,
|
||||
UnrecognizedTagToAny: dm.unrecognizedTagToAny,
|
||||
TimeTagToAny: dm.timeTagToAny,
|
||||
SimpleValues: simpleValues,
|
||||
NaN: dm.nanDec,
|
||||
Inf: dm.infDec,
|
||||
ByteStringToTime: dm.byteStringToTime,
|
||||
ByteStringExpectedFormat: dm.byteStringExpectedFormat,
|
||||
BignumTag: dm.bignumTag,
|
||||
BinaryUnmarshaler: dm.binaryUnmarshaler,
|
||||
TextUnmarshaler: dm.textUnmarshaler,
|
||||
JSONUnmarshalerTranscoder: dm.jsonUnmarshalerTranscoder,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1245,7 +1288,7 @@ func (dm *decMode) DecOptions() DecOptions {
|
||||
// Unmarshal returns an error.
|
||||
//
|
||||
// See the documentation for Unmarshal for details.
|
||||
func (dm *decMode) Unmarshal(data []byte, v interface{}) error {
|
||||
func (dm *decMode) Unmarshal(data []byte, v any) error {
|
||||
d := decoder{data: data, dm: dm}
|
||||
|
||||
// Check well-formedness.
|
||||
@@ -1265,7 +1308,7 @@ func (dm *decMode) Unmarshal(data []byte, v interface{}) error {
|
||||
// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
|
||||
//
|
||||
// See the documentation for Unmarshal for details.
|
||||
func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) {
|
||||
func (dm *decMode) UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
|
||||
d := decoder{data: data, dm: dm}
|
||||
|
||||
// check well-formedness.
|
||||
@@ -1341,13 +1384,13 @@ type decoder struct {
|
||||
// If CBOR data item fails to be decoded into v,
|
||||
// error is returned and offset is moved to the next CBOR data item.
|
||||
// Precondition: d.data contains at least one well-formed CBOR data item.
|
||||
func (d *decoder) value(v interface{}) error {
|
||||
func (d *decoder) value(v any) error {
|
||||
// v can't be nil, non-pointer, or nil pointer value.
|
||||
if v == nil {
|
||||
return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"}
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
if rv.Kind() != reflect.Pointer {
|
||||
return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"}
|
||||
} else if rv.IsNil() {
|
||||
return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"}
|
||||
@@ -1361,9 +1404,9 @@ func (d *decoder) value(v interface{}) error {
|
||||
func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
|
||||
|
||||
// Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil.
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Ptr {
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Pointer {
|
||||
d.skip()
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
v.SetZero()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1387,7 +1430,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
||||
registeredType := d.dm.tags.getTypeFromTagNum(tagNums)
|
||||
if registeredType != nil {
|
||||
if registeredType.Implements(tInfo.nonPtrType) ||
|
||||
reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) {
|
||||
reflect.PointerTo(registeredType).Implements(tInfo.nonPtrType) {
|
||||
v.Set(reflect.New(registeredType))
|
||||
v = v.Elem()
|
||||
tInfo = getTypeInfo(registeredType)
|
||||
@@ -1399,7 +1442,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
||||
|
||||
// Create new value for the pointer v to point to.
|
||||
// At this point, CBOR value is not nil/undefined if v is a pointer.
|
||||
for v.Kind() == reflect.Ptr {
|
||||
for v.Kind() == reflect.Pointer {
|
||||
if v.IsNil() {
|
||||
if !v.CanSet() {
|
||||
d.skip()
|
||||
@@ -1460,6 +1503,17 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
||||
|
||||
case specialTypeUnmarshalerIface:
|
||||
return d.parseToUnmarshaler(v)
|
||||
|
||||
case specialTypeUnexportedUnmarshalerIface:
|
||||
return d.parseToUnexportedUnmarshaler(v)
|
||||
|
||||
case specialTypeJSONUnmarshalerIface:
|
||||
// This special type implies that the type does not also implement
|
||||
// cbor.Umarshaler.
|
||||
if d.dm.jsonUnmarshalerTranscoder == nil {
|
||||
break
|
||||
}
|
||||
return d.parseToJSONUnmarshaler(v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1516,14 +1570,14 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
||||
return err
|
||||
}
|
||||
copied = copied || converted
|
||||
return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler)
|
||||
return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
|
||||
|
||||
case cborTypeTextString:
|
||||
b, err := d.parseTextString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fillTextString(t, b, v)
|
||||
return fillTextString(t, b, v, d.dm.textUnmarshaler)
|
||||
|
||||
case cborTypePrimitives:
|
||||
_, ai, val := d.getHead()
|
||||
@@ -1575,7 +1629,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
||||
return nil
|
||||
}
|
||||
if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
|
||||
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler)
|
||||
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
|
||||
}
|
||||
if bi.IsUint64() {
|
||||
return fillPositiveInt(t, bi.Uint64(), v)
|
||||
@@ -1598,7 +1652,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
||||
return nil
|
||||
}
|
||||
if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
|
||||
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler)
|
||||
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
|
||||
}
|
||||
if bi.IsInt64() {
|
||||
return fillNegativeInt(t, bi.Int64(), v)
|
||||
@@ -1788,12 +1842,12 @@ func (d *decoder) parseToTime() (time.Time, bool, error) {
|
||||
// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface.
|
||||
// It assumes data is well-formed, and does not perform bounds checking.
|
||||
func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
d.skip()
|
||||
return nil
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Ptr && v.CanAddr() {
|
||||
if v.Kind() != reflect.Pointer && v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
if u, ok := v.Interface().(Unmarshaler); ok {
|
||||
@@ -1805,9 +1859,55 @@ func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
|
||||
return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler")
|
||||
}
|
||||
|
||||
// parseToUnexportedUnmarshaler parses CBOR data to value implementing unmarshaler interface.
|
||||
// It assumes data is well-formed, and does not perform bounds checking.
|
||||
func (d *decoder) parseToUnexportedUnmarshaler(v reflect.Value) error {
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
d.skip()
|
||||
return nil
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Pointer && v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
if u, ok := v.Interface().(unmarshaler); ok {
|
||||
start := d.off
|
||||
d.skip()
|
||||
return u.unmarshalCBOR(d.data[start:d.off])
|
||||
}
|
||||
d.skip()
|
||||
return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.unmarshaler")
|
||||
}
|
||||
|
||||
// parseToJSONUnmarshaler parses CBOR data to be transcoded to JSON and passed to the value's
|
||||
// implementation of the json.Unmarshaler interface. It assumes data is well-formed, and does not
|
||||
// perform bounds checking.
|
||||
func (d *decoder) parseToJSONUnmarshaler(v reflect.Value) error {
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
d.skip()
|
||||
return nil
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Pointer && v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
if u, ok := v.Interface().(jsonUnmarshaler); ok {
|
||||
start := d.off
|
||||
d.skip()
|
||||
e := getEncodeBuffer()
|
||||
defer putEncodeBuffer(e)
|
||||
if err := d.dm.jsonUnmarshalerTranscoder.Transcode(e, bytes.NewReader(d.data[start:d.off])); err != nil {
|
||||
return &TranscodeError{err: err, rtype: v.Type(), sourceFormat: "cbor", targetFormat: "json"}
|
||||
}
|
||||
return u.UnmarshalJSON(e.Bytes())
|
||||
}
|
||||
d.skip()
|
||||
return errors.New("cbor: failed to assert " + v.Type().String() + " as json.Unmarshaler")
|
||||
}
|
||||
|
||||
// parse parses CBOR data and returns value in default Go type.
|
||||
// It assumes data is well-formed, and does not perform bounds checking.
|
||||
func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo
|
||||
func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyclo
|
||||
// Strip self-described CBOR tag number.
|
||||
if skipSelfDescribedTag {
|
||||
for d.nextCBORType() == cborTypeTag {
|
||||
@@ -2224,15 +2324,15 @@ func (d *decoder) parseTextString() ([]byte, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) parseArray() ([]interface{}, error) {
|
||||
func (d *decoder) parseArray() ([]any, error) {
|
||||
_, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
|
||||
hasSize := !indefiniteLength
|
||||
count := int(val)
|
||||
if !hasSize {
|
||||
count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance
|
||||
}
|
||||
v := make([]interface{}, count)
|
||||
var e interface{}
|
||||
v := make([]any, count)
|
||||
var e any
|
||||
var err, lastErr error
|
||||
for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
|
||||
if e, lastErr = d.parse(true); lastErr != nil {
|
||||
@@ -2290,20 +2390,19 @@ func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error {
|
||||
}
|
||||
// Set remaining Go array elements to zero values.
|
||||
if gi < vLen {
|
||||
zeroV := reflect.Zero(tInfo.elemTypeInfo.typ)
|
||||
for ; gi < vLen; gi++ {
|
||||
v.Index(gi).Set(zeroV)
|
||||
v.Index(gi).SetZero()
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *decoder) parseMap() (interface{}, error) {
|
||||
func (d *decoder) parseMap() (any, error) {
|
||||
_, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
|
||||
hasSize := !indefiniteLength
|
||||
count := int(val)
|
||||
m := make(map[interface{}]interface{})
|
||||
var k, e interface{}
|
||||
m := make(map[any]any)
|
||||
var k, e any
|
||||
var err, lastErr error
|
||||
keyCount := 0
|
||||
for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
|
||||
@@ -2376,13 +2475,13 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
|
||||
}
|
||||
keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ
|
||||
reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind)
|
||||
var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value
|
||||
var keyValue, eleValue reflect.Value
|
||||
keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable.
|
||||
var err, lastErr error
|
||||
keyCount := v.Len()
|
||||
var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key.
|
||||
var existingKeys map[any]bool // Store existing map keys, used for detecting duplicate map key.
|
||||
if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
|
||||
existingKeys = make(map[interface{}]bool, keyCount)
|
||||
existingKeys = make(map[any]bool, keyCount)
|
||||
if keyCount > 0 {
|
||||
vKeys := v.MapKeys()
|
||||
for i := 0; i < len(vKeys); i++ {
|
||||
@@ -2395,10 +2494,7 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
|
||||
if !keyValue.IsValid() {
|
||||
keyValue = reflect.New(keyType).Elem()
|
||||
} else if !reuseKey {
|
||||
if !zeroKeyValue.IsValid() {
|
||||
zeroKeyValue = reflect.Zero(keyType)
|
||||
}
|
||||
keyValue.Set(zeroKeyValue)
|
||||
keyValue.SetZero()
|
||||
}
|
||||
if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil {
|
||||
if err == nil {
|
||||
@@ -2413,7 +2509,7 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
|
||||
if !isHashableValue(keyValue.Elem()) {
|
||||
var converted bool
|
||||
if d.dm.mapKeyByteString == MapKeyByteStringAllowed {
|
||||
var k interface{}
|
||||
var k any
|
||||
k, converted = convertByteSliceToByteString(keyValue.Elem().Interface())
|
||||
if converted {
|
||||
keyValue.Set(reflect.ValueOf(k))
|
||||
@@ -2433,10 +2529,7 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
|
||||
if !eleValue.IsValid() {
|
||||
eleValue = reflect.New(eleType).Elem()
|
||||
} else if !reuseEle {
|
||||
if !zeroEleValue.IsValid() {
|
||||
zeroEleValue = reflect.Zero(eleType)
|
||||
}
|
||||
eleValue.Set(zeroEleValue)
|
||||
eleValue.SetZero()
|
||||
}
|
||||
if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil {
|
||||
if err == nil {
|
||||
@@ -2584,7 +2677,7 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n
|
||||
|
||||
// Keeps track of CBOR map keys to detect duplicate map key
|
||||
keyCount := 0
|
||||
var mapKeys map[interface{}]struct{}
|
||||
var mapKeys map[any]struct{}
|
||||
|
||||
errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0
|
||||
|
||||
@@ -2594,7 +2687,7 @@ MapEntryLoop:
|
||||
|
||||
// If duplicate field detection is enabled and the key at index j did not match any
|
||||
// field, k will hold the map key.
|
||||
var k interface{}
|
||||
var k any
|
||||
|
||||
t := d.nextCBORType()
|
||||
if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) {
|
||||
@@ -2764,7 +2857,7 @@ MapEntryLoop:
|
||||
// check is never reached.
|
||||
if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
|
||||
if mapKeys == nil {
|
||||
mapKeys = make(map[interface{}]struct{}, 1)
|
||||
mapKeys = make(map[any]struct{}, 1)
|
||||
}
|
||||
mapKeys[k] = struct{}{}
|
||||
newKeyCount := len(mapKeys)
|
||||
@@ -2968,20 +3061,25 @@ func (d *decoder) nextCBORNil() bool {
|
||||
return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7
|
||||
}
|
||||
|
||||
type jsonUnmarshaler interface{ UnmarshalJSON([]byte) error }
|
||||
|
||||
var (
|
||||
typeIntf = reflect.TypeOf([]interface{}(nil)).Elem()
|
||||
typeTime = reflect.TypeOf(time.Time{})
|
||||
typeBigInt = reflect.TypeOf(big.Int{})
|
||||
typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
|
||||
typeString = reflect.TypeOf("")
|
||||
typeByteSlice = reflect.TypeOf([]byte(nil))
|
||||
typeIntf = reflect.TypeOf([]any(nil)).Elem()
|
||||
typeTime = reflect.TypeOf(time.Time{})
|
||||
typeBigInt = reflect.TypeOf(big.Int{})
|
||||
typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
typeUnexportedUnmarshaler = reflect.TypeOf((*unmarshaler)(nil)).Elem()
|
||||
typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
|
||||
typeTextUnmarshaler = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
typeJSONUnmarshaler = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
|
||||
typeString = reflect.TypeOf("")
|
||||
typeByteSlice = reflect.TypeOf([]byte(nil))
|
||||
)
|
||||
|
||||
func fillNil(_ cborType, v reflect.Value) error {
|
||||
switch v.Kind() {
|
||||
case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr:
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
case reflect.Slice, reflect.Map, reflect.Interface, reflect.Pointer:
|
||||
v.SetZero()
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
@@ -3082,8 +3180,8 @@ func fillFloat(t cborType, val float64, v reflect.Value) error {
|
||||
return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
|
||||
}
|
||||
|
||||
func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error {
|
||||
if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) {
|
||||
func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode, tum TextUnmarshalerMode) error {
|
||||
if bum == BinaryUnmarshalerByteString && reflect.PointerTo(v.Type()).Implements(typeBinaryUnmarshaler) {
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok {
|
||||
@@ -3095,9 +3193,26 @@ func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts B
|
||||
}
|
||||
return errors.New("cbor: cannot set new value for " + v.Type().String())
|
||||
}
|
||||
if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String {
|
||||
v.SetString(string(val))
|
||||
return nil
|
||||
if bsts != ByteStringToStringForbidden {
|
||||
if tum == TextUnmarshalerTextString && reflect.PointerTo(v.Type()).Implements(typeTextUnmarshaler) {
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
|
||||
// The contract of TextUnmarshaler forbids retaining the input
|
||||
// bytes, so no copying is required even if val is shared.
|
||||
if err := u.UnmarshalText(val); err != nil {
|
||||
return fmt.Errorf("cbor: cannot unmarshal text for %s: %w", v.Type(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.New("cbor: cannot set new value for " + v.Type().String())
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.String {
|
||||
v.SetString(string(val))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
|
||||
src := val
|
||||
@@ -3117,9 +3232,8 @@ func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts B
|
||||
}
|
||||
// Set remaining Go array elements to zero values.
|
||||
if i < vLen {
|
||||
zeroV := reflect.Zero(reflect.TypeOf(byte(0)))
|
||||
for ; i < vLen; i++ {
|
||||
v.Index(i).Set(zeroV)
|
||||
v.Index(i).SetZero()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -3127,11 +3241,28 @@ func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts B
|
||||
return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
|
||||
}
|
||||
|
||||
func fillTextString(t cborType, val []byte, v reflect.Value) error {
|
||||
func fillTextString(t cborType, val []byte, v reflect.Value, tum TextUnmarshalerMode) error {
|
||||
// Check if the value implements TextUnmarshaler and the mode allows it
|
||||
if tum == TextUnmarshalerTextString && reflect.PointerTo(v.Type()).Implements(typeTextUnmarshaler) {
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
|
||||
// The contract of TextUnmarshaler forbids retaining the input
|
||||
// bytes, so no copying is required even if val is shared.
|
||||
if err := u.UnmarshalText(val); err != nil {
|
||||
return fmt.Errorf("cbor: cannot unmarshal text for %s: %w", v.Type(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.New("cbor: cannot set new value for " + v.Type().String())
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.String {
|
||||
v.SetString(string(val))
|
||||
return nil
|
||||
}
|
||||
|
||||
return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
|
||||
}
|
||||
|
||||
@@ -3172,7 +3303,7 @@ func isHashableValue(rv reflect.Value) bool {
|
||||
// This function also handles nested tags.
|
||||
// CBOR data is already verified to be well-formed before this function is used,
|
||||
// so the recursion won't exceed max nested levels.
|
||||
func convertByteSliceToByteString(v interface{}) (interface{}, bool) {
|
||||
func convertByteSliceToByteString(v any) (any, bool) {
|
||||
switch v := v.(type) {
|
||||
case []byte:
|
||||
return ByteString(v), true
|
||||
|
||||
51
vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
51
vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
@@ -2,15 +2,15 @@
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
/*
|
||||
Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags,
|
||||
Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding,
|
||||
Package cbor is a modern CBOR codec (RFC 8949 & RFC 8742) with CBOR tags,
|
||||
Go struct tag options (toarray/keyasint/omitempty/omitzero), Core Deterministic Encoding,
|
||||
CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
|
||||
|
||||
Encoding options allow "preferred serialization" by encoding integers and floats
|
||||
to their smallest forms (e.g. float16) when values fit.
|
||||
|
||||
Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller
|
||||
and easier to use with structs.
|
||||
Struct tag options "keyasint", "toarray", "omitempty", and "omitzero" reduce encoding size
|
||||
and reduce programming effort.
|
||||
|
||||
For example, "toarray" tag makes struct fields encode to CBOR array elements. And
|
||||
"keyasint" makes a field encode to an element of CBOR map with specified int key.
|
||||
@@ -23,11 +23,19 @@ The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
|
||||
|
||||
Function signatures identical to encoding/json include:
|
||||
|
||||
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode.
|
||||
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode
|
||||
|
||||
Standard interfaces include:
|
||||
|
||||
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler.
|
||||
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler
|
||||
|
||||
Diagnostic functions translate CBOR data item into Diagnostic Notation:
|
||||
|
||||
Diagnose, DiagnoseFirst
|
||||
|
||||
Functions that simplify using CBOR Sequences (RFC 8742) include:
|
||||
|
||||
UnmarshalFirst
|
||||
|
||||
Custom encoding and decoding is possible by implementing standard interfaces for
|
||||
user-defined Go types.
|
||||
@@ -50,19 +58,19 @@ Modes are intended to be reused and are safe for concurrent use.
|
||||
|
||||
EncMode and DecMode Interfaces
|
||||
|
||||
// EncMode interface uses immutable options and is safe for concurrent use.
|
||||
type EncMode interface {
|
||||
// EncMode interface uses immutable options and is safe for concurrent use.
|
||||
type EncMode interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
NewEncoder(w io.Writer) *Encoder
|
||||
EncOptions() EncOptions // returns copy of options
|
||||
}
|
||||
}
|
||||
|
||||
// DecMode interface uses immutable options and is safe for concurrent use.
|
||||
type DecMode interface {
|
||||
// DecMode interface uses immutable options and is safe for concurrent use.
|
||||
type DecMode interface {
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
NewDecoder(r io.Reader) *Decoder
|
||||
DecOptions() DecOptions // returns copy of options
|
||||
}
|
||||
}
|
||||
|
||||
Using Default Encoding Mode
|
||||
|
||||
@@ -78,6 +86,16 @@ Using Default Decoding Mode
|
||||
decoder := cbor.NewDecoder(r)
|
||||
err = decoder.Decode(&v)
|
||||
|
||||
Using Default Mode of UnmarshalFirst to Decode CBOR Sequences
|
||||
|
||||
// Decode the first CBOR data item and return remaining bytes:
|
||||
rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
|
||||
|
||||
Using Extended Diagnostic Notation (EDN) to represent CBOR data
|
||||
|
||||
// Translate the first CBOR data item into text and return remaining bytes.
|
||||
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to text
|
||||
|
||||
Creating and Using Encoding Modes
|
||||
|
||||
// Create EncOptions using either struct literal or a function.
|
||||
@@ -111,15 +129,20 @@ Decoding Options: https://github.com/fxamacker/cbor#decoding-options
|
||||
Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
|
||||
If both struct tags are specified then `cbor` is used.
|
||||
|
||||
Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use
|
||||
Struct tag options like "keyasint", "toarray", "omitempty", and "omitzero" make it easy to use
|
||||
very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
|
||||
|
||||
The "omitzero" option omits zero values from encoding, matching
|
||||
[stdlib encoding/json behavior](https://pkg.go.dev/encoding/json#Marshal).
|
||||
When specified in the `cbor` tag, the option is always honored.
|
||||
When specified in the `json` tag, the option is honored when building with Go 1.24+.
|
||||
|
||||
For example, "toarray" makes struct fields encode to array elements. And "keyasint"
|
||||
makes struct fields encode to elements of CBOR map with int keys.
|
||||
|
||||
https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
|
||||
|
||||
Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1
|
||||
Struct tag options are listed at https://github.com/fxamacker/cbor#struct-tags-1
|
||||
|
||||
# Tests and Fuzzing
|
||||
|
||||
|
||||
442
vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
442
vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
@@ -58,8 +58,10 @@ import (
|
||||
//
|
||||
// Marshal supports format string stored under the "cbor" key in the struct
|
||||
// field's tag. CBOR format string can specify the name of the field,
|
||||
// "omitempty" and "keyasint" options, and special case "-" for field omission.
|
||||
// If "cbor" key is absent, Marshal uses "json" key.
|
||||
// "omitempty", "omitzero" and "keyasint" options, and special case "-" for
|
||||
// field omission. If "cbor" key is absent, Marshal uses "json" key.
|
||||
// When using the "json" key, the "omitzero" option is honored when building
|
||||
// with Go 1.24+ to match stdlib encoding/json behavior.
|
||||
//
|
||||
// Struct field name is treated as integer if it has "keyasint" option in
|
||||
// its format string. The format string must specify an integer as its
|
||||
@@ -67,8 +69,8 @@ import (
|
||||
//
|
||||
// Special struct field "_" is used to specify struct level options, such as
|
||||
// "toarray". "toarray" option enables Go struct to be encoded as CBOR array.
|
||||
// "omitempty" is disabled by "toarray" to ensure that the same number
|
||||
// of elements are encoded every time.
|
||||
// "omitempty" and "omitzero" are disabled by "toarray" to ensure that the
|
||||
// same number of elements are encoded every time.
|
||||
//
|
||||
// Anonymous struct fields are marshaled as if their exported fields
|
||||
// were fields in the outer struct. Marshal follows the same struct fields
|
||||
@@ -92,7 +94,7 @@ import (
|
||||
//
|
||||
// Values of other types cannot be encoded in CBOR. Attempting
|
||||
// to encode such a value causes Marshal to return an UnsupportedTypeError.
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
func Marshal(v any) ([]byte, error) {
|
||||
return defaultEncMode.Marshal(v)
|
||||
}
|
||||
|
||||
@@ -103,7 +105,7 @@ func Marshal(v interface{}) ([]byte, error) {
|
||||
// partially encoded data if error is returned.
|
||||
//
|
||||
// See Marshal for more details.
|
||||
func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error {
|
||||
func MarshalToBuffer(v any, buf *bytes.Buffer) error {
|
||||
return defaultEncMode.MarshalToBuffer(v, buf)
|
||||
}
|
||||
|
||||
@@ -130,6 +132,20 @@ func (e *MarshalerError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
type TranscodeError struct {
|
||||
err error
|
||||
rtype reflect.Type
|
||||
sourceFormat, targetFormat string
|
||||
}
|
||||
|
||||
func (e TranscodeError) Error() string {
|
||||
return "cbor: cannot transcode from " + e.sourceFormat + " to " + e.targetFormat + ": " + e.err.Error()
|
||||
}
|
||||
|
||||
func (e TranscodeError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// UnsupportedTypeError is returned by Marshal when attempting to encode value
|
||||
// of an unsupported type.
|
||||
type UnsupportedTypeError struct {
|
||||
@@ -291,24 +307,51 @@ func (icm InfConvertMode) valid() bool {
|
||||
return icm >= 0 && icm < maxInfConvert
|
||||
}
|
||||
|
||||
// TimeMode specifies how to encode time.Time values.
|
||||
// TimeMode specifies how to encode time.Time values in compliance with RFC 8949 (CBOR):
|
||||
// - Section 3.4.1: Standard Date/Time String
|
||||
// - Section 3.4.2: Epoch-Based Date/Time
|
||||
// For more info, see:
|
||||
// - https://www.rfc-editor.org/rfc/rfc8949.html
|
||||
// NOTE: User applications that prefer to encode time with fractional seconds to an integer
|
||||
// (instead of floating point or text string) can use a CBOR tag number not assigned by IANA:
|
||||
// 1. Define a user-defined type in Go with just a time.Time or int64 as its data.
|
||||
// 2. Implement the cbor.Marshaler and cbor.Unmarshaler interface for that user-defined type
|
||||
// to encode or decode the tagged data item with an enclosed integer content.
|
||||
type TimeMode int
|
||||
|
||||
const (
|
||||
// TimeUnix causes time.Time to be encoded as epoch time in integer with second precision.
|
||||
// TimeUnix causes time.Time to encode to a CBOR time (tag 1) with an integer content
|
||||
// representing seconds elapsed (with 1-second precision) since UNIX Epoch UTC.
|
||||
// The TimeUnix option is location independent and has a clear precision guarantee.
|
||||
TimeUnix TimeMode = iota
|
||||
|
||||
// TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision.
|
||||
// TimeUnixMicro causes time.Time to encode to a CBOR time (tag 1) with a floating point content
|
||||
// representing seconds elapsed (with up to 1-microsecond precision) since UNIX Epoch UTC.
|
||||
// NOTE: The floating point content is encoded to the shortest floating-point encoding that preserves
|
||||
// the 64-bit floating point value. I.e., the floating point encoding can be IEEE 764:
|
||||
// binary64, binary32, or binary16 depending on the content's value.
|
||||
TimeUnixMicro
|
||||
|
||||
// TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds,
|
||||
// otherwise float-point rounded to microsecond precision.
|
||||
// TimeUnixDynamic causes time.Time to encode to a CBOR time (tag 1) with either an integer content or
|
||||
// a floating point content, depending on the content's value. This option is equivalent to dynamically
|
||||
// choosing TimeUnix if time.Time doesn't have fractional seconds, and using TimeUnixMicro if time.Time
|
||||
// has fractional seconds.
|
||||
TimeUnixDynamic
|
||||
|
||||
// TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision.
|
||||
// TimeRFC3339 causes time.Time to encode to a CBOR time (tag 0) with a text string content
|
||||
// representing the time using 1-second precision in RFC3339 format. If the time.Time has a
|
||||
// non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339.
|
||||
// NOTE: User applications can avoid including the RFC3339 numeric offset by:
|
||||
// - providing a time.Time value set to UTC, or
|
||||
// - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339.
|
||||
TimeRFC3339
|
||||
|
||||
// TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision.
|
||||
// TimeRFC3339Nano causes time.Time to encode to a CBOR time (tag 0) with a text string content
|
||||
// representing the time using 1-nanosecond precision in RFC3339 format. If the time.Time has a
|
||||
// non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339.
|
||||
// NOTE: User applications can avoid including the RFC3339 numeric offset by:
|
||||
// - providing a time.Time value set to UTC, or
|
||||
// - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339Nano.
|
||||
TimeRFC3339Nano
|
||||
|
||||
maxTimeMode
|
||||
@@ -481,6 +524,24 @@ func (bmm BinaryMarshalerMode) valid() bool {
|
||||
return bmm >= 0 && bmm < maxBinaryMarshalerMode
|
||||
}
|
||||
|
||||
// TextMarshalerMode specifies how to encode types that implement encoding.TextMarshaler.
|
||||
type TextMarshalerMode int
|
||||
|
||||
const (
|
||||
// TextMarshalerNone does not recognize TextMarshaler implementations during encode.
|
||||
// This is the default behavior.
|
||||
TextMarshalerNone TextMarshalerMode = iota
|
||||
|
||||
// TextMarshalerTextString encodes the output of MarshalText to a CBOR text string.
|
||||
TextMarshalerTextString
|
||||
|
||||
maxTextMarshalerMode
|
||||
)
|
||||
|
||||
func (tmm TextMarshalerMode) valid() bool {
|
||||
return tmm >= 0 && tmm < maxTextMarshalerMode
|
||||
}
|
||||
|
||||
// EncOptions specifies encoding options.
|
||||
type EncOptions struct {
|
||||
// Sort specifies sorting order.
|
||||
@@ -538,6 +599,14 @@ type EncOptions struct {
|
||||
|
||||
// BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler.
|
||||
BinaryMarshaler BinaryMarshalerMode
|
||||
|
||||
// TextMarshaler specifies how to encode types that implement encoding.TextMarshaler.
|
||||
TextMarshaler TextMarshalerMode
|
||||
|
||||
// JSONMarshalerTranscoder sets the transcoding scheme used to marshal types that implement
|
||||
// json.Marshaler but do not also implement cbor.Marshaler. If nil, encoding behavior is not
|
||||
// influenced by whether or not a type implements json.Marshaler.
|
||||
JSONMarshalerTranscoder Transcoder
|
||||
}
|
||||
|
||||
// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding,
|
||||
@@ -748,6 +817,9 @@ func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore
|
||||
if !opts.BinaryMarshaler.valid() {
|
||||
return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler)))
|
||||
}
|
||||
if !opts.TextMarshaler.valid() {
|
||||
return nil, errors.New("cbor: invalid TextMarshaler " + strconv.Itoa(int(opts.TextMarshaler)))
|
||||
}
|
||||
em := encMode{
|
||||
sort: opts.Sort,
|
||||
shortestFloat: opts.ShortestFloat,
|
||||
@@ -767,13 +839,15 @@ func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore
|
||||
byteSliceLaterEncodingTag: byteSliceLaterEncodingTag,
|
||||
byteArray: opts.ByteArray,
|
||||
binaryMarshaler: opts.BinaryMarshaler,
|
||||
textMarshaler: opts.TextMarshaler,
|
||||
jsonMarshalerTranscoder: opts.JSONMarshalerTranscoder,
|
||||
}
|
||||
return &em, nil
|
||||
}
|
||||
|
||||
// EncMode is the main interface for CBOR encoding.
|
||||
type EncMode interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Marshal(v any) ([]byte, error)
|
||||
NewEncoder(w io.Writer) *Encoder
|
||||
EncOptions() EncOptions
|
||||
}
|
||||
@@ -783,7 +857,7 @@ type EncMode interface {
|
||||
// into the built-in buffer pool.
|
||||
type UserBufferEncMode interface {
|
||||
EncMode
|
||||
MarshalToBuffer(v interface{}, buf *bytes.Buffer) error
|
||||
MarshalToBuffer(v any, buf *bytes.Buffer) error
|
||||
|
||||
// This private method is to prevent users implementing
|
||||
// this interface and so future additions to it will
|
||||
@@ -812,6 +886,8 @@ type encMode struct {
|
||||
byteSliceLaterEncodingTag uint64
|
||||
byteArray ByteArrayMode
|
||||
binaryMarshaler BinaryMarshalerMode
|
||||
textMarshaler TextMarshalerMode
|
||||
jsonMarshalerTranscoder Transcoder
|
||||
}
|
||||
|
||||
var defaultEncMode, _ = EncOptions{}.encMode()
|
||||
@@ -888,22 +964,24 @@ func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode
|
||||
// EncOptions returns user specified options used to create this EncMode.
|
||||
func (em *encMode) EncOptions() EncOptions {
|
||||
return EncOptions{
|
||||
Sort: em.sort,
|
||||
ShortestFloat: em.shortestFloat,
|
||||
NaNConvert: em.nanConvert,
|
||||
InfConvert: em.infConvert,
|
||||
BigIntConvert: em.bigIntConvert,
|
||||
Time: em.time,
|
||||
TimeTag: em.timeTag,
|
||||
IndefLength: em.indefLength,
|
||||
NilContainers: em.nilContainers,
|
||||
TagsMd: em.tagsMd,
|
||||
OmitEmpty: em.omitEmpty,
|
||||
String: em.stringType,
|
||||
FieldName: em.fieldName,
|
||||
ByteSliceLaterFormat: em.byteSliceLaterFormat,
|
||||
ByteArray: em.byteArray,
|
||||
BinaryMarshaler: em.binaryMarshaler,
|
||||
Sort: em.sort,
|
||||
ShortestFloat: em.shortestFloat,
|
||||
NaNConvert: em.nanConvert,
|
||||
InfConvert: em.infConvert,
|
||||
BigIntConvert: em.bigIntConvert,
|
||||
Time: em.time,
|
||||
TimeTag: em.timeTag,
|
||||
IndefLength: em.indefLength,
|
||||
NilContainers: em.nilContainers,
|
||||
TagsMd: em.tagsMd,
|
||||
OmitEmpty: em.omitEmpty,
|
||||
String: em.stringType,
|
||||
FieldName: em.fieldName,
|
||||
ByteSliceLaterFormat: em.byteSliceLaterFormat,
|
||||
ByteArray: em.byteArray,
|
||||
BinaryMarshaler: em.binaryMarshaler,
|
||||
TextMarshaler: em.textMarshaler,
|
||||
JSONMarshalerTranscoder: em.jsonMarshalerTranscoder,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -921,7 +999,7 @@ func (em *encMode) encTagBytes(t reflect.Type) []byte {
|
||||
// Marshal returns the CBOR encoding of v using em encoding mode.
|
||||
//
|
||||
// See the documentation for Marshal for details.
|
||||
func (em *encMode) Marshal(v interface{}) ([]byte, error) {
|
||||
func (em *encMode) Marshal(v any) ([]byte, error) {
|
||||
e := getEncodeBuffer()
|
||||
|
||||
if err := encode(e, em, reflect.ValueOf(v)); err != nil {
|
||||
@@ -943,7 +1021,7 @@ func (em *encMode) Marshal(v interface{}) ([]byte, error) {
|
||||
// partially encoded data if error is returned.
|
||||
//
|
||||
// See Marshal for more details.
|
||||
func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error {
|
||||
func (em *encMode) MarshalToBuffer(v any, buf *bytes.Buffer) error {
|
||||
if buf == nil {
|
||||
return fmt.Errorf("cbor: encoding buffer provided by user is nil")
|
||||
}
|
||||
@@ -957,7 +1035,7 @@ func (em *encMode) NewEncoder(w io.Writer) *Encoder {
|
||||
|
||||
// encodeBufferPool caches unused bytes.Buffer objects for later reuse.
|
||||
var encodeBufferPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
New: func() any {
|
||||
e := new(bytes.Buffer)
|
||||
e.Grow(32) // TODO: make this configurable
|
||||
return e
|
||||
@@ -975,6 +1053,7 @@ func putEncodeBuffer(e *bytes.Buffer) {
|
||||
|
||||
type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error
|
||||
type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error)
|
||||
type isZeroFunc func(v reflect.Value) (zero bool, err error)
|
||||
|
||||
func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
if !v.IsValid() {
|
||||
@@ -983,7 +1062,7 @@ func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
vt := v.Type()
|
||||
f, _ := getEncodeFunc(vt)
|
||||
f, _, _ := getEncodeFunc(vt)
|
||||
if f == nil {
|
||||
return &UnsupportedTypeError{vt}
|
||||
}
|
||||
@@ -1483,6 +1562,15 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if f.omitZero {
|
||||
zero, err := f.izf(fv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if zero {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !f.keyAsInt && em.fieldName == FieldNameToByteString {
|
||||
e.Write(f.cborNameByteString)
|
||||
@@ -1665,6 +1753,107 @@ func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, e
|
||||
return len(data) == 0, nil
|
||||
}
|
||||
|
||||
type textMarshalerEncoder struct {
|
||||
alternateEncode encodeFunc
|
||||
alternateIsEmpty isEmptyFunc
|
||||
}
|
||||
|
||||
func (tme textMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
if em.textMarshaler == TextMarshalerNone {
|
||||
return tme.alternateEncode(e, em, v)
|
||||
}
|
||||
|
||||
vt := v.Type()
|
||||
m, ok := v.Interface().(encoding.TextMarshaler)
|
||||
if !ok {
|
||||
pv := reflect.New(vt)
|
||||
pv.Elem().Set(v)
|
||||
m = pv.Interface().(encoding.TextMarshaler)
|
||||
}
|
||||
data, err := m.MarshalText()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cbor: cannot marshal text for %s: %w", vt, err)
|
||||
}
|
||||
if b := em.encTagBytes(vt); b != nil {
|
||||
e.Write(b)
|
||||
}
|
||||
|
||||
encodeHead(e, byte(cborTypeTextString), uint64(len(data)))
|
||||
e.Write(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tme textMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
|
||||
if em.textMarshaler == TextMarshalerNone {
|
||||
return tme.alternateIsEmpty(em, v)
|
||||
}
|
||||
|
||||
m, ok := v.Interface().(encoding.TextMarshaler)
|
||||
if !ok {
|
||||
pv := reflect.New(v.Type())
|
||||
pv.Elem().Set(v)
|
||||
m = pv.Interface().(encoding.TextMarshaler)
|
||||
}
|
||||
data, err := m.MarshalText()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cbor: cannot marshal text for %s: %w", v.Type(), err)
|
||||
}
|
||||
return len(data) == 0, nil
|
||||
}
|
||||
|
||||
type jsonMarshalerEncoder struct {
|
||||
alternateEncode encodeFunc
|
||||
alternateIsEmpty isEmptyFunc
|
||||
}
|
||||
|
||||
func (jme jsonMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
if em.jsonMarshalerTranscoder == nil {
|
||||
return jme.alternateEncode(e, em, v)
|
||||
}
|
||||
|
||||
vt := v.Type()
|
||||
m, ok := v.Interface().(jsonMarshaler)
|
||||
if !ok {
|
||||
pv := reflect.New(vt)
|
||||
pv.Elem().Set(v)
|
||||
m = pv.Interface().(jsonMarshaler)
|
||||
}
|
||||
|
||||
json, err := m.MarshalJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offset := e.Len()
|
||||
|
||||
if b := em.encTagBytes(vt); b != nil {
|
||||
e.Write(b)
|
||||
}
|
||||
|
||||
if err := em.jsonMarshalerTranscoder.Transcode(e, bytes.NewReader(json)); err != nil {
|
||||
return &TranscodeError{err: err, rtype: vt, sourceFormat: "json", targetFormat: "cbor"}
|
||||
}
|
||||
|
||||
// Validate that the transcode function has written exactly one well-formed data item.
|
||||
d := decoder{data: e.Bytes()[offset:], dm: getMarshalerDecMode(em.indefLength, em.tagsMd)}
|
||||
if err := d.wellformed(false, true); err != nil {
|
||||
e.Truncate(offset)
|
||||
return &TranscodeError{err: err, rtype: vt, sourceFormat: "json", targetFormat: "cbor"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (jme jsonMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
|
||||
if em.jsonMarshalerTranscoder == nil {
|
||||
return jme.alternateIsEmpty(em, v)
|
||||
}
|
||||
|
||||
// As with types implementing cbor.Marshaler, transcoded json.Marshaler values always encode
|
||||
// as exactly one complete CBOR data item.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
if em.tagsMd == TagsForbidden && v.Type() == typeRawTag {
|
||||
return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden")
|
||||
@@ -1768,41 +1957,45 @@ func encodeHead(e *bytes.Buffer, t byte, n uint64) int {
|
||||
return headSize
|
||||
}
|
||||
|
||||
type jsonMarshaler interface{ MarshalJSON() ([]byte, error) }
|
||||
|
||||
var (
|
||||
typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
|
||||
typeTextMarshaler = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
typeJSONMarshaler = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
|
||||
typeRawMessage = reflect.TypeOf(RawMessage(nil))
|
||||
typeByteString = reflect.TypeOf(ByteString(""))
|
||||
)
|
||||
|
||||
func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
|
||||
func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc, izf isZeroFunc) {
|
||||
k := t.Kind()
|
||||
if k == reflect.Ptr {
|
||||
return getEncodeIndirectValueFunc(t), isEmptyPtr
|
||||
if k == reflect.Pointer {
|
||||
return getEncodeIndirectValueFunc(t), isEmptyPtr, getIsZeroFunc(t)
|
||||
}
|
||||
switch t {
|
||||
case typeSimpleValue:
|
||||
return encodeMarshalerType, isEmptyUint
|
||||
return encodeMarshalerType, isEmptyUint, getIsZeroFunc(t)
|
||||
|
||||
case typeTag:
|
||||
return encodeTag, alwaysNotEmpty
|
||||
return encodeTag, alwaysNotEmpty, getIsZeroFunc(t)
|
||||
|
||||
case typeTime:
|
||||
return encodeTime, alwaysNotEmpty
|
||||
return encodeTime, alwaysNotEmpty, getIsZeroFunc(t)
|
||||
|
||||
case typeBigInt:
|
||||
return encodeBigInt, alwaysNotEmpty
|
||||
return encodeBigInt, alwaysNotEmpty, getIsZeroFunc(t)
|
||||
|
||||
case typeRawMessage:
|
||||
return encodeMarshalerType, isEmptySlice
|
||||
return encodeMarshalerType, isEmptySlice, getIsZeroFunc(t)
|
||||
|
||||
case typeByteString:
|
||||
return encodeMarshalerType, isEmptyString
|
||||
return encodeMarshalerType, isEmptyString, getIsZeroFunc(t)
|
||||
}
|
||||
if reflect.PtrTo(t).Implements(typeMarshaler) {
|
||||
return encodeMarshalerType, alwaysNotEmpty
|
||||
if reflect.PointerTo(t).Implements(typeMarshaler) {
|
||||
return encodeMarshalerType, alwaysNotEmpty, getIsZeroFunc(t)
|
||||
}
|
||||
if reflect.PtrTo(t).Implements(typeBinaryMarshaler) {
|
||||
if reflect.PointerTo(t).Implements(typeBinaryMarshaler) {
|
||||
defer func() {
|
||||
// capture encoding method used for modes that disable BinaryMarshaler
|
||||
bme := binaryMarshalerEncoder{
|
||||
@@ -1813,41 +2006,65 @@ func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
|
||||
ief = bme.isEmpty
|
||||
}()
|
||||
}
|
||||
if reflect.PointerTo(t).Implements(typeTextMarshaler) {
|
||||
defer func() {
|
||||
// capture encoding method used for modes that disable TextMarshaler
|
||||
tme := textMarshalerEncoder{
|
||||
alternateEncode: ef,
|
||||
alternateIsEmpty: ief,
|
||||
}
|
||||
ef = tme.encode
|
||||
ief = tme.isEmpty
|
||||
}()
|
||||
}
|
||||
if reflect.PointerTo(t).Implements(typeJSONMarshaler) {
|
||||
defer func() {
|
||||
// capture encoding method used for modes that don't support transcoding
|
||||
// from types that implement json.Marshaler.
|
||||
jme := jsonMarshalerEncoder{
|
||||
alternateEncode: ef,
|
||||
alternateIsEmpty: ief,
|
||||
}
|
||||
ef = jme.encode
|
||||
ief = jme.isEmpty
|
||||
}()
|
||||
}
|
||||
|
||||
switch k {
|
||||
case reflect.Bool:
|
||||
return encodeBool, isEmptyBool
|
||||
return encodeBool, isEmptyBool, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return encodeInt, isEmptyInt
|
||||
return encodeInt, isEmptyInt, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return encodeUint, isEmptyUint
|
||||
return encodeUint, isEmptyUint, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return encodeFloat, isEmptyFloat
|
||||
return encodeFloat, isEmptyFloat, getIsZeroFunc(t)
|
||||
|
||||
case reflect.String:
|
||||
return encodeString, isEmptyString
|
||||
return encodeString, isEmptyString, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
return encodeByteString, isEmptySlice
|
||||
return encodeByteString, isEmptySlice, getIsZeroFunc(t)
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f, _ := getEncodeFunc(t.Elem())
|
||||
f, _, _ := getEncodeFunc(t.Elem())
|
||||
if f == nil {
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
return arrayEncodeFunc{f: f}.encode, isEmptySlice
|
||||
return arrayEncodeFunc{f: f}.encode, isEmptySlice, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Map:
|
||||
f := getEncodeMapFunc(t)
|
||||
if f == nil {
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
return f, isEmptyMap
|
||||
return f, isEmptyMap, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Struct:
|
||||
// Get struct's special field "_" tag options
|
||||
@@ -1855,31 +2072,31 @@ func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
|
||||
tag := f.Tag.Get("cbor")
|
||||
if tag != "-" {
|
||||
if hasToArrayOption(tag) {
|
||||
return encodeStructToArray, isEmptyStruct
|
||||
return encodeStructToArray, isEmptyStruct, isZeroFieldStruct
|
||||
}
|
||||
}
|
||||
}
|
||||
return encodeStruct, isEmptyStruct
|
||||
return encodeStruct, isEmptyStruct, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Interface:
|
||||
return encodeIntf, isEmptyIntf
|
||||
return encodeIntf, isEmptyIntf, getIsZeroFunc(t)
|
||||
}
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc {
|
||||
for t.Kind() == reflect.Ptr {
|
||||
for t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
}
|
||||
f, _ := getEncodeFunc(t)
|
||||
f, _, _ := getEncodeFunc(t)
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
return func(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
for v.Kind() == reflect.Ptr && !v.IsNil() {
|
||||
for v.Kind() == reflect.Pointer && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
if v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
e.Write(cborNil)
|
||||
return nil
|
||||
}
|
||||
@@ -1987,3 +2204,96 @@ func float32NaNFromReflectValue(v reflect.Value) float32 {
|
||||
f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32)
|
||||
return f32
|
||||
}
|
||||
|
||||
type isZeroer interface {
|
||||
IsZero() bool
|
||||
}
|
||||
|
||||
var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem()
|
||||
|
||||
// getIsZeroFunc returns a function for the given type that can be called to determine if a given value is zero.
|
||||
// Types that implement `IsZero() bool` are delegated to for non-nil values.
|
||||
// Types that do not implement `IsZero() bool` use the reflect.Value#IsZero() implementation.
|
||||
// The returned function matches behavior of stdlib encoding/json behavior in Go 1.24+.
|
||||
func getIsZeroFunc(t reflect.Type) isZeroFunc {
|
||||
// Provide a function that uses a type's IsZero method if defined.
|
||||
switch {
|
||||
case t == nil:
|
||||
return isZeroDefault
|
||||
case t.Kind() == reflect.Interface && t.Implements(isZeroerType):
|
||||
return isZeroInterfaceCustom
|
||||
case t.Kind() == reflect.Pointer && t.Implements(isZeroerType):
|
||||
return isZeroPointerCustom
|
||||
case t.Implements(isZeroerType):
|
||||
return isZeroCustom
|
||||
case reflect.PointerTo(t).Implements(isZeroerType):
|
||||
return isZeroAddrCustom
|
||||
default:
|
||||
return isZeroDefault
|
||||
}
|
||||
}
|
||||
|
||||
// isZeroInterfaceCustom returns true for nil or pointer-to-nil values,
|
||||
// and delegates to the custom IsZero() implementation otherwise.
|
||||
func isZeroInterfaceCustom(v reflect.Value) (bool, error) {
|
||||
kind := v.Kind()
|
||||
|
||||
switch kind {
|
||||
case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.Interface, reflect.Slice:
|
||||
if v.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Interface, reflect.Pointer:
|
||||
if elem := v.Elem(); elem.Kind() == reflect.Pointer && elem.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return v.Interface().(isZeroer).IsZero(), nil
|
||||
}
|
||||
|
||||
// isZeroPointerCustom returns true for nil values,
|
||||
// and delegates to the custom IsZero() implementation otherwise.
|
||||
func isZeroPointerCustom(v reflect.Value) (bool, error) {
|
||||
if v.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
return v.Interface().(isZeroer).IsZero(), nil
|
||||
}
|
||||
|
||||
// isZeroCustom delegates to the custom IsZero() implementation.
|
||||
func isZeroCustom(v reflect.Value) (bool, error) {
|
||||
return v.Interface().(isZeroer).IsZero(), nil
|
||||
}
|
||||
|
||||
// isZeroAddrCustom delegates to the custom IsZero() implementation of the addr of the value.
|
||||
func isZeroAddrCustom(v reflect.Value) (bool, error) {
|
||||
if !v.CanAddr() {
|
||||
// Temporarily box v so we can take the address.
|
||||
v2 := reflect.New(v.Type()).Elem()
|
||||
v2.Set(v)
|
||||
v = v2
|
||||
}
|
||||
return v.Addr().Interface().(isZeroer).IsZero(), nil
|
||||
}
|
||||
|
||||
// isZeroDefault calls reflect.Value#IsZero()
|
||||
func isZeroDefault(v reflect.Value) (bool, error) {
|
||||
if !v.IsValid() {
|
||||
// v is zero value
|
||||
return true, nil
|
||||
}
|
||||
return v.IsZero(), nil
|
||||
}
|
||||
|
||||
// isZeroFieldStruct is used to determine whether to omit toarray structs
|
||||
func isZeroFieldStruct(v reflect.Value) (bool, error) {
|
||||
structType, err := getEncodingStructType(v.Type())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(structType.fields) == 0, nil
|
||||
}
|
||||
|
||||
10
vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
10
vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
@@ -1,8 +1,6 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build go1.20
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
@@ -67,8 +65,8 @@ func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v
|
||||
}
|
||||
|
||||
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf, _ := getEncodeFunc(t.Key())
|
||||
ef, _ := getEncodeFunc(t.Elem())
|
||||
kf, _, _ := getEncodeFunc(t.Key())
|
||||
ef, _, _ := getEncodeFunc(t.Elem())
|
||||
if kf == nil || ef == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -76,13 +74,13 @@ func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf: kf,
|
||||
ef: ef,
|
||||
kpool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
New: func() any {
|
||||
rk := reflect.New(t.Key()).Elem()
|
||||
return &rk
|
||||
},
|
||||
},
|
||||
vpool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
New: func() any {
|
||||
rv := reflect.New(t.Elem()).Elem()
|
||||
return &rv
|
||||
},
|
||||
|
||||
60
vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
60
vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
@@ -1,60 +0,0 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build !go1.20
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type mapKeyValueEncodeFunc struct {
|
||||
kf, ef encodeFunc
|
||||
}
|
||||
|
||||
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||
if kvs == nil {
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
initial := e.Len()
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
offset := e.Len()
|
||||
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
valueOffset := e.Len()
|
||||
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
kvs[i] = keyValue{
|
||||
offset: offset - initial,
|
||||
valueOffset: valueOffset - initial,
|
||||
nextOffset: e.Len() - initial,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf, _ := getEncodeFunc(t.Key())
|
||||
ef, _ := getEncodeFunc(t.Elem())
|
||||
if kf == nil || ef == nil {
|
||||
return nil
|
||||
}
|
||||
mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef}
|
||||
return mapEncodeFunc{
|
||||
e: mkv.encodeKeyValues,
|
||||
}.encode
|
||||
}
|
||||
8
vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go
generated
vendored
Normal file
8
vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build go1.24
|
||||
|
||||
package cbor
|
||||
|
||||
var jsonStdlibSupportsOmitzero = true
|
||||
8
vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go
generated
vendored
Normal file
8
vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build !go1.24
|
||||
|
||||
package cbor
|
||||
|
||||
var jsonStdlibSupportsOmitzero = false
|
||||
29
vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
29
vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
@@ -45,6 +48,9 @@ func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
|
||||
}
|
||||
|
||||
// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
|
||||
//
|
||||
// Deprecated: No longer used by this codec; kept for compatibility
|
||||
// with user apps that directly call this function.
|
||||
func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
|
||||
if sv == nil {
|
||||
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
|
||||
@@ -52,6 +58,29 @@ func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Check well-formedness of CBOR data item.
|
||||
// SimpleValue.UnmarshalCBOR() is exported, so
|
||||
// the codec needs to support same behavior for:
|
||||
// - Unmarshal(data, *SimpleValue)
|
||||
// - SimpleValue.UnmarshalCBOR(data)
|
||||
err := d.wellformed(false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sv.unmarshalCBOR(data)
|
||||
}
|
||||
|
||||
// unmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
|
||||
// This function assumes data is well-formed, and does not perform bounds checking.
|
||||
// This function is called by Unmarshal().
|
||||
func (sv *SimpleValue) unmarshalCBOR(data []byte) error {
|
||||
if sv == nil {
|
||||
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
typ, ai, val := d.getHead()
|
||||
|
||||
if typ != cborTypePrimitives {
|
||||
|
||||
4
vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
4
vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
@@ -26,7 +26,7 @@ func NewDecoder(r io.Reader) *Decoder {
|
||||
}
|
||||
|
||||
// Decode reads CBOR value and decodes it into the value pointed to by v.
|
||||
func (dec *Decoder) Decode(v interface{}) error {
|
||||
func (dec *Decoder) Decode(v any) error {
|
||||
_, err := dec.readNext()
|
||||
if err != nil {
|
||||
// Return validation error or read error.
|
||||
@@ -170,7 +170,7 @@ func NewEncoder(w io.Writer) *Encoder {
|
||||
}
|
||||
|
||||
// Encode writes the CBOR encoding of v.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
func (enc *Encoder) Encode(v any) error {
|
||||
if len(enc.indefTypes) > 0 && v != nil {
|
||||
indefType := enc.indefTypes[len(enc.indefTypes)-1]
|
||||
if indefType == cborTypeTextString {
|
||||
|
||||
18
vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
18
vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
@@ -18,9 +18,11 @@ type field struct {
|
||||
typ reflect.Type
|
||||
ef encodeFunc
|
||||
ief isEmptyFunc
|
||||
izf isZeroFunc
|
||||
typInfo *typeInfo // used to decoder to reuse type info
|
||||
tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
|
||||
omitEmpty bool // used to skip empty field
|
||||
omitZero bool // used to skip zero field
|
||||
keyAsInt bool // used to encode/decode field name as int
|
||||
}
|
||||
|
||||
@@ -157,7 +159,7 @@ func appendFields(
|
||||
f := t.Field(i)
|
||||
|
||||
ft := f.Type
|
||||
for ft.Kind() == reflect.Ptr {
|
||||
for ft.Kind() == reflect.Pointer {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
@@ -165,9 +167,11 @@ func appendFields(
|
||||
continue
|
||||
}
|
||||
|
||||
cborTag := true
|
||||
tag := f.Tag.Get("cbor")
|
||||
if tag == "" {
|
||||
tag = f.Tag.Get("json")
|
||||
cborTag = false
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
@@ -177,7 +181,7 @@ func appendFields(
|
||||
|
||||
// Parse field tag options
|
||||
var tagFieldName string
|
||||
var omitempty, keyasint bool
|
||||
var omitempty, omitzero, keyasint bool
|
||||
for j := 0; tag != ""; j++ {
|
||||
var token string
|
||||
idx := strings.IndexByte(tag, ',')
|
||||
@@ -192,6 +196,10 @@ func appendFields(
|
||||
switch token {
|
||||
case "omitempty":
|
||||
omitempty = true
|
||||
case "omitzero":
|
||||
if cborTag || jsonStdlibSupportsOmitzero {
|
||||
omitzero = true
|
||||
}
|
||||
case "keyasint":
|
||||
keyasint = true
|
||||
}
|
||||
@@ -213,6 +221,7 @@ func appendFields(
|
||||
idx: fIdx,
|
||||
typ: f.Type,
|
||||
omitEmpty: omitempty,
|
||||
omitZero: omitzero,
|
||||
keyAsInt: keyasint,
|
||||
tagged: tagged})
|
||||
} else {
|
||||
@@ -230,8 +239,7 @@ func appendFields(
|
||||
// a nonexportable anonymous field of struct type.
|
||||
// Nonexportable anonymous field of struct type can contain exportable fields.
|
||||
func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
|
||||
exportable := f.PkgPath == ""
|
||||
return exportable || (f.Anonymous && fk == reflect.Struct)
|
||||
return f.IsExported() || (f.Anonymous && fk == reflect.Struct)
|
||||
}
|
||||
|
||||
type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
|
||||
@@ -244,7 +252,7 @@ func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv r
|
||||
fv = fv.Field(n)
|
||||
|
||||
if i < len(idx)-1 {
|
||||
if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
|
||||
if fv.Kind() == reflect.Pointer && fv.Type().Elem().Kind() == reflect.Struct {
|
||||
if fv.IsNil() {
|
||||
// Null pointer to embedded struct field
|
||||
fv, err = f(fv)
|
||||
|
||||
48
vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
48
vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
@@ -7,27 +10,54 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and
|
||||
// unmarshaling of tag content is subject to any encode and decode options that would apply to
|
||||
// enclosed data item if it were to appear outside of a tag.
|
||||
// Tag represents a tagged data item (CBOR major type 6), comprising a tag number and the unmarshaled tag content.
|
||||
// NOTE: The same encoding and decoding options that apply to untagged CBOR data items also applies to tag content
|
||||
// during encoding and decoding.
|
||||
type Tag struct {
|
||||
Number uint64
|
||||
Content interface{}
|
||||
Content any
|
||||
}
|
||||
|
||||
// RawTag represents CBOR tag data, including tag number and raw tag content.
|
||||
// RawTag implements Unmarshaler and Marshaler interfaces.
|
||||
// RawTag represents a tagged data item (CBOR major type 6), comprising a tag number and the raw tag content.
|
||||
// The raw tag content (enclosed data item) is a CBOR-encoded data item.
|
||||
// RawTag can be used to delay decoding a CBOR data item or precompute encoding a CBOR data item.
|
||||
type RawTag struct {
|
||||
Number uint64
|
||||
Content RawMessage
|
||||
}
|
||||
|
||||
// UnmarshalCBOR sets *t with tag number and raw tag content copied from data.
|
||||
// UnmarshalCBOR sets *t with the tag number and the raw tag content copied from data.
|
||||
//
|
||||
// Deprecated: No longer used by this codec; kept for compatibility
|
||||
// with user apps that directly call this function.
|
||||
func (t *RawTag) UnmarshalCBOR(data []byte) error {
|
||||
if t == nil {
|
||||
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Check if data is a well-formed CBOR data item.
|
||||
// RawTag.UnmarshalCBOR() is exported, so
|
||||
// the codec needs to support same behavior for:
|
||||
// - Unmarshal(data, *RawTag)
|
||||
// - RawTag.UnmarshalCBOR(data)
|
||||
err := d.wellformed(false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.unmarshalCBOR(data)
|
||||
}
|
||||
|
||||
// unmarshalCBOR sets *t with the tag number and the raw tag content copied from data.
|
||||
// This function assumes data is well-formed, and does not perform bounds checking.
|
||||
// This function is called by Unmarshal().
|
||||
func (t *RawTag) unmarshalCBOR(data []byte) error {
|
||||
if t == nil {
|
||||
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
// Decoding CBOR null and undefined to cbor.RawTag is no-op.
|
||||
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||
return nil
|
||||
@@ -193,7 +223,7 @@ func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64,
|
||||
if contentType == nil {
|
||||
return errors.New("cbor: cannot add nil content type to TagSet")
|
||||
}
|
||||
for contentType.Kind() == reflect.Ptr {
|
||||
for contentType.Kind() == reflect.Pointer {
|
||||
contentType = contentType.Elem()
|
||||
}
|
||||
tag, err := newTagItem(opts, contentType, num, nestedNum...)
|
||||
@@ -216,7 +246,7 @@ func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64,
|
||||
|
||||
// Remove removes given tag content type from TagSet.
|
||||
func (t *syncTagSet) Remove(contentType reflect.Type) {
|
||||
for contentType.Kind() == reflect.Ptr {
|
||||
for contentType.Kind() == reflect.Pointer {
|
||||
contentType = contentType.Elem()
|
||||
}
|
||||
t.Lock()
|
||||
|
||||
82
vendor/github.com/go-logfmt/logfmt/README.md
generated
vendored
82
vendor/github.com/go-logfmt/logfmt/README.md
generated
vendored
@@ -1,41 +1,41 @@
|
||||
# logfmt
|
||||
|
||||
[](https://pkg.go.dev/github.com/go-logfmt/logfmt)
|
||||
[](https://goreportcard.com/report/go-logfmt/logfmt)
|
||||
[](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml)
|
||||
[](https://coveralls.io/github/go-logfmt/logfmt?branch=main)
|
||||
|
||||
Package logfmt implements utilities to marshal and unmarshal data in the [logfmt
|
||||
format][fmt]. It provides an API similar to [encoding/json][json] and
|
||||
[encoding/xml][xml].
|
||||
|
||||
[fmt]: https://brandur.org/logfmt
|
||||
[json]: https://pkg.go.dev/encoding/json
|
||||
[xml]: https://pkg.go.dev/encoding/xml
|
||||
|
||||
The logfmt format was first documented by Brandur Leach in [this
|
||||
article][origin]. The format has not been formally standardized. The most
|
||||
authoritative public specification to date has been the documentation of a Go
|
||||
Language [package][parser] written by Blake Mizerany and Keith Rarick.
|
||||
|
||||
[origin]: https://brandur.org/logfmt
|
||||
[parser]: https://pkg.go.dev/github.com/kr/logfmt
|
||||
|
||||
## Goals
|
||||
|
||||
This project attempts to conform as closely as possible to the prior art, while
|
||||
also removing ambiguity where necessary to provide well behaved encoder and
|
||||
decoder implementations.
|
||||
|
||||
## Non-goals
|
||||
|
||||
This project does not attempt to formally standardize the logfmt format. In the
|
||||
event that logfmt is standardized this project would take conforming to the
|
||||
standard as a goal.
|
||||
|
||||
## Versioning
|
||||
|
||||
This project publishes releases according to the Go language guidelines for
|
||||
[developing and publishing modules][pub].
|
||||
|
||||
[pub]: https://go.dev/doc/modules/developing
|
||||
# logfmt
|
||||
|
||||
[](https://pkg.go.dev/github.com/go-logfmt/logfmt)
|
||||
[](https://goreportcard.com/report/go-logfmt/logfmt)
|
||||
[](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml)
|
||||
[](https://coveralls.io/github/go-logfmt/logfmt?branch=main)
|
||||
|
||||
Package logfmt implements utilities to marshal and unmarshal data in the [logfmt
|
||||
format][fmt]. It provides an API similar to [encoding/json][json] and
|
||||
[encoding/xml][xml].
|
||||
|
||||
[fmt]: https://brandur.org/logfmt
|
||||
[json]: https://pkg.go.dev/encoding/json
|
||||
[xml]: https://pkg.go.dev/encoding/xml
|
||||
|
||||
The logfmt format was first documented by Brandur Leach in [this
|
||||
article][origin]. The format has not been formally standardized. The most
|
||||
authoritative public specification to date has been the documentation of a Go
|
||||
Language [package][parser] written by Blake Mizerany and Keith Rarick.
|
||||
|
||||
[origin]: https://brandur.org/logfmt
|
||||
[parser]: https://pkg.go.dev/github.com/kr/logfmt
|
||||
|
||||
## Goals
|
||||
|
||||
This project attempts to conform as closely as possible to the prior art, while
|
||||
also removing ambiguity where necessary to provide well behaved encoder and
|
||||
decoder implementations.
|
||||
|
||||
## Non-goals
|
||||
|
||||
This project does not attempt to formally standardize the logfmt format. In the
|
||||
event that logfmt is standardized this project would take conforming to the
|
||||
standard as a goal.
|
||||
|
||||
## Versioning
|
||||
|
||||
This project publishes releases according to the Go language guidelines for
|
||||
[developing and publishing modules][pub].
|
||||
|
||||
[pub]: https://go.dev/doc/modules/developing
|
||||
|
||||
13
vendor/github.com/google/cel-go/cel/BUILD.bazel
generated
vendored
13
vendor/github.com/google/cel-go/cel/BUILD.bazel
generated
vendored
@@ -11,15 +11,17 @@ go_library(
|
||||
"decls.go",
|
||||
"env.go",
|
||||
"folding.go",
|
||||
"io.go",
|
||||
"inlining.go",
|
||||
"io.go",
|
||||
"library.go",
|
||||
"macro.go",
|
||||
"optimizer.go",
|
||||
"options.go",
|
||||
"program.go",
|
||||
"prompt.go",
|
||||
"validator.go",
|
||||
],
|
||||
embedsrcs = ["//cel/templates"],
|
||||
importpath = "github.com/google/cel-go/cel",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
@@ -29,6 +31,7 @@ go_library(
|
||||
"//common/ast:go_default_library",
|
||||
"//common/containers:go_default_library",
|
||||
"//common/decls:go_default_library",
|
||||
"//common/env:go_default_library",
|
||||
"//common/functions:go_default_library",
|
||||
"//common/operators:go_default_library",
|
||||
"//common/overloads:go_default_library",
|
||||
@@ -61,9 +64,10 @@ go_test(
|
||||
"decls_test.go",
|
||||
"env_test.go",
|
||||
"folding_test.go",
|
||||
"io_test.go",
|
||||
"inlining_test.go",
|
||||
"io_test.go",
|
||||
"optimizer_test.go",
|
||||
"prompt_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
data = [
|
||||
@@ -72,6 +76,9 @@ go_test(
|
||||
embed = [
|
||||
":go_default_library",
|
||||
],
|
||||
embedsrcs = [
|
||||
"//cel/testdata:prompts",
|
||||
],
|
||||
deps = [
|
||||
"//common/operators:go_default_library",
|
||||
"//common/overloads:go_default_library",
|
||||
@@ -83,8 +90,8 @@ go_test(
|
||||
"//test/proto2pb:go_default_library",
|
||||
"//test/proto3pb:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//encoding/prototext:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
|
||||
],
|
||||
|
||||
70
vendor/github.com/google/cel-go/cel/decls.go
generated
vendored
70
vendor/github.com/google/cel-go/cel/decls.go
generated
vendored
@@ -142,8 +142,23 @@ func Constant(name string, t *Type, v ref.Val) EnvOption {
|
||||
|
||||
// Variable creates an instance of a variable declaration with a variable name and type.
|
||||
func Variable(name string, t *Type) EnvOption {
|
||||
return VariableWithDoc(name, t, "")
|
||||
}
|
||||
|
||||
// VariableWithDoc creates an instance of a variable declaration with a variable name, type, and doc string.
|
||||
func VariableWithDoc(name string, t *Type, doc string) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.variables = append(e.variables, decls.NewVariable(name, t))
|
||||
e.variables = append(e.variables, decls.NewVariableWithDoc(name, t, doc))
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// VariableDecls configures a set of fully defined cel.VariableDecl instances in the environment.
|
||||
func VariableDecls(vars ...*decls.VariableDecl) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
for _, v := range vars {
|
||||
e.variables = append(e.variables, v)
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
@@ -183,13 +198,38 @@ func Function(name string, opts ...FunctionOpt) EnvOption {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if existing, found := e.functions[fn.Name()]; found {
|
||||
fn, err = existing.Merge(fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return FunctionDecls(fn)(e)
|
||||
}
|
||||
}
|
||||
|
||||
// OverloadSelector selects an overload associated with a given function when it returns true.
|
||||
//
|
||||
// Used in combination with the FunctionDecl.Subset method.
|
||||
type OverloadSelector = decls.OverloadSelector
|
||||
|
||||
// IncludeOverloads defines an OverloadSelector which allow-lists a set of overloads by their ids.
|
||||
func IncludeOverloads(overloadIDs ...string) OverloadSelector {
|
||||
return decls.IncludeOverloads(overloadIDs...)
|
||||
}
|
||||
|
||||
// ExcludeOverloads defines an OverloadSelector which deny-lists a set of overloads by their ids.
|
||||
func ExcludeOverloads(overloadIDs ...string) OverloadSelector {
|
||||
return decls.ExcludeOverloads(overloadIDs...)
|
||||
}
|
||||
|
||||
// FunctionDecls provides one or more fully formed function declarations to be added to the environment.
|
||||
func FunctionDecls(funcs ...*decls.FunctionDecl) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
var err error
|
||||
for _, fn := range funcs {
|
||||
if existing, found := e.functions[fn.Name()]; found {
|
||||
fn, err = existing.Merge(fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
e.functions[fn.Name()] = fn
|
||||
}
|
||||
e.functions[fn.Name()] = fn
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
@@ -197,6 +237,13 @@ func Function(name string, opts ...FunctionOpt) EnvOption {
|
||||
// FunctionOpt defines a functional option for configuring a function declaration.
|
||||
type FunctionOpt = decls.FunctionOpt
|
||||
|
||||
// FunctionDocs provides a general usage documentation for the function.
|
||||
//
|
||||
// Use OverloadExamples to provide example usage instructions for specific overloads.
|
||||
func FunctionDocs(docs ...string) FunctionOpt {
|
||||
return decls.FunctionDocs(docs...)
|
||||
}
|
||||
|
||||
// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
|
||||
//
|
||||
// Note, this approach works well if operand is expected to have a specific trait which it implements,
|
||||
@@ -270,6 +317,11 @@ func MemberOverload(overloadID string, args []*Type, resultType *Type, opts ...O
|
||||
// OverloadOpt is a functional option for configuring a function overload.
|
||||
type OverloadOpt = decls.OverloadOpt
|
||||
|
||||
// OverloadExamples configures an example of how to invoke the overload.
|
||||
func OverloadExamples(docs ...string) OverloadOpt {
|
||||
return decls.OverloadExamples(docs...)
|
||||
}
|
||||
|
||||
// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
|
||||
// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
|
||||
func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
|
||||
@@ -288,6 +340,12 @@ func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
|
||||
return decls.FunctionBinding(binding)
|
||||
}
|
||||
|
||||
// LateFunctionBinding indicates that the function has a binding which is not known at compile time.
|
||||
// This is useful for functions which have side-effects or are not deterministically computable.
|
||||
func LateFunctionBinding() OverloadOpt {
|
||||
return decls.LateFunctionBinding()
|
||||
}
|
||||
|
||||
// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
|
||||
//
|
||||
// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
|
||||
|
||||
233
vendor/github.com/google/cel-go/cel/env.go
generated
vendored
233
vendor/github.com/google/cel-go/cel/env.go
generated
vendored
@@ -16,6 +16,8 @@ package cel
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/google/cel-go/checker"
|
||||
@@ -24,12 +26,15 @@ import (
|
||||
celast "github.com/google/cel-go/common/ast"
|
||||
"github.com/google/cel-go/common/containers"
|
||||
"github.com/google/cel-go/common/decls"
|
||||
"github.com/google/cel-go/common/env"
|
||||
"github.com/google/cel-go/common/stdlib"
|
||||
"github.com/google/cel-go/common/types"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/interpreter"
|
||||
"github.com/google/cel-go/parser"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
// Source interface representing a user-provided expression.
|
||||
@@ -127,12 +132,13 @@ type Env struct {
|
||||
Container *containers.Container
|
||||
variables []*decls.VariableDecl
|
||||
functions map[string]*decls.FunctionDecl
|
||||
macros []parser.Macro
|
||||
macros []Macro
|
||||
contextProto protoreflect.MessageDescriptor
|
||||
adapter types.Adapter
|
||||
provider types.Provider
|
||||
features map[int]bool
|
||||
appliedFeatures map[int]bool
|
||||
libraries map[string]bool
|
||||
libraries map[string]SingletonLibrary
|
||||
validators []ASTValidator
|
||||
costOptions []checker.CostOption
|
||||
|
||||
@@ -151,6 +157,134 @@ type Env struct {
|
||||
progOpts []ProgramOption
|
||||
}
|
||||
|
||||
// ToConfig produces a YAML-serializable env.Config object from the given environment.
|
||||
//
|
||||
// The serialized configuration value is intended to represent a baseline set of config
|
||||
// options which could be used as input to an EnvOption to configure the majority of the
|
||||
// environment from a file.
|
||||
//
|
||||
// Note: validators, features, flags, and safe-guard settings are not yet supported by
|
||||
// the serialize method. Since optimizers are a separate construct from the environment
|
||||
// and the standard expression components (parse, check, evalute), they are also not
|
||||
// supported by the serialize method.
|
||||
func (e *Env) ToConfig(name string) (*env.Config, error) {
|
||||
conf := env.NewConfig(name)
|
||||
// Container settings
|
||||
if e.Container != containers.DefaultContainer {
|
||||
conf.SetContainer(e.Container.Name())
|
||||
}
|
||||
for _, typeName := range e.Container.AliasSet() {
|
||||
conf.AddImports(env.NewImport(typeName))
|
||||
}
|
||||
|
||||
libOverloads := map[string][]string{}
|
||||
for libName, lib := range e.libraries {
|
||||
// Track the options which have been configured by a library and
|
||||
// then diff the library version against the configured function
|
||||
// to detect incremental overloads or rewrites.
|
||||
libEnv, _ := NewCustomEnv()
|
||||
libEnv, _ = Lib(lib)(libEnv)
|
||||
for fnName, fnDecl := range libEnv.Functions() {
|
||||
if len(fnDecl.OverloadDecls()) == 0 {
|
||||
continue
|
||||
}
|
||||
overloads, exist := libOverloads[fnName]
|
||||
if !exist {
|
||||
overloads = make([]string, 0, len(fnDecl.OverloadDecls()))
|
||||
}
|
||||
for _, o := range fnDecl.OverloadDecls() {
|
||||
overloads = append(overloads, o.ID())
|
||||
}
|
||||
libOverloads[fnName] = overloads
|
||||
}
|
||||
subsetLib, canSubset := lib.(LibrarySubsetter)
|
||||
alias := ""
|
||||
if aliasLib, canAlias := lib.(LibraryAliaser); canAlias {
|
||||
alias = aliasLib.LibraryAlias()
|
||||
libName = alias
|
||||
}
|
||||
if libName == "stdlib" && canSubset {
|
||||
conf.SetStdLib(subsetLib.LibrarySubset())
|
||||
continue
|
||||
}
|
||||
version := uint32(math.MaxUint32)
|
||||
if versionLib, isVersioned := lib.(LibraryVersioner); isVersioned {
|
||||
version = versionLib.LibraryVersion()
|
||||
}
|
||||
conf.AddExtensions(env.NewExtension(libName, version))
|
||||
}
|
||||
|
||||
// If this is a custom environment without the standard env, mark the stdlib as disabled.
|
||||
if conf.StdLib == nil && !e.HasLibrary("cel.lib.std") {
|
||||
conf.SetStdLib(env.NewLibrarySubset().SetDisabled(true))
|
||||
}
|
||||
|
||||
// Serialize the variables
|
||||
vars := make([]*decls.VariableDecl, 0, len(e.Variables()))
|
||||
stdTypeVars := map[string]*decls.VariableDecl{}
|
||||
for _, v := range stdlib.Types() {
|
||||
stdTypeVars[v.Name()] = v
|
||||
}
|
||||
for _, v := range e.Variables() {
|
||||
if _, isStdType := stdTypeVars[v.Name()]; isStdType {
|
||||
continue
|
||||
}
|
||||
vars = append(vars, v)
|
||||
}
|
||||
if e.contextProto != nil {
|
||||
conf.SetContextVariable(env.NewContextVariable(string(e.contextProto.FullName())))
|
||||
skipVariables := map[string]bool{}
|
||||
fields := e.contextProto.Fields()
|
||||
for i := 0; i < fields.Len(); i++ {
|
||||
field := fields.Get(i)
|
||||
variable, err := fieldToVariable(field)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not serialize context field variable %q, reason: %w", field.FullName(), err)
|
||||
}
|
||||
skipVariables[variable.Name()] = true
|
||||
}
|
||||
for _, v := range vars {
|
||||
if _, found := skipVariables[v.Name()]; !found {
|
||||
conf.AddVariableDecls(v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
conf.AddVariableDecls(vars...)
|
||||
}
|
||||
|
||||
// Serialize functions which are distinct from the ones configured by libraries.
|
||||
for fnName, fnDecl := range e.Functions() {
|
||||
if excludedOverloads, found := libOverloads[fnName]; found {
|
||||
if newDecl := fnDecl.Subset(decls.ExcludeOverloads(excludedOverloads...)); newDecl != nil {
|
||||
conf.AddFunctionDecls(newDecl)
|
||||
}
|
||||
} else {
|
||||
conf.AddFunctionDecls(fnDecl)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize validators
|
||||
for _, val := range e.Validators() {
|
||||
// Only add configurable validators to the env.Config as all others are
|
||||
// expected to be implicitly enabled via extension libraries.
|
||||
if confVal, ok := val.(ConfigurableASTValidator); ok {
|
||||
conf.AddValidators(confVal.ToConfig())
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize features
|
||||
for featID, enabled := range e.features {
|
||||
featName, found := featureNameByID(featID)
|
||||
if !found {
|
||||
// If the feature isn't named, it isn't intended to be publicly exposed
|
||||
continue
|
||||
}
|
||||
conf.AddFeatures(env.NewFeature(featName, enabled))
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// NewEnv creates a program environment configured with the standard library of CEL functions and
|
||||
// macros. The Env value returned can parse and check any CEL program which builds upon the core
|
||||
// features documented in the CEL specification.
|
||||
@@ -194,7 +328,7 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
|
||||
provider: registry,
|
||||
features: map[int]bool{},
|
||||
appliedFeatures: map[int]bool{},
|
||||
libraries: map[string]bool{},
|
||||
libraries: map[string]SingletonLibrary{},
|
||||
validators: []ASTValidator{},
|
||||
progOpts: []ProgramOption{},
|
||||
costOptions: []checker.CostOption{},
|
||||
@@ -362,7 +496,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
|
||||
for k, v := range e.functions {
|
||||
funcsCopy[k] = v
|
||||
}
|
||||
libsCopy := make(map[string]bool, len(e.libraries))
|
||||
libsCopy := make(map[string]SingletonLibrary, len(e.libraries))
|
||||
for k, v := range e.libraries {
|
||||
libsCopy[k] = v
|
||||
}
|
||||
@@ -376,6 +510,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
|
||||
variables: varsCopy,
|
||||
functions: funcsCopy,
|
||||
macros: macsCopy,
|
||||
contextProto: e.contextProto,
|
||||
progOpts: progOptsCopy,
|
||||
adapter: adapter,
|
||||
features: featuresCopy,
|
||||
@@ -399,8 +534,8 @@ func (e *Env) HasFeature(flag int) bool {
|
||||
|
||||
// HasLibrary returns whether a specific SingletonLibrary has been configured in the environment.
|
||||
func (e *Env) HasLibrary(libName string) bool {
|
||||
configured, exists := e.libraries[libName]
|
||||
return exists && configured
|
||||
_, exists := e.libraries[libName]
|
||||
return exists
|
||||
}
|
||||
|
||||
// Libraries returns a list of SingletonLibrary that have been configured in the environment.
|
||||
@@ -418,9 +553,27 @@ func (e *Env) HasFunction(functionName string) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// Functions returns map of Functions, keyed by function name, that have been configured in the environment.
|
||||
// Functions returns a shallow copy of the Functions, keyed by function name, that have been configured in the environment.
|
||||
func (e *Env) Functions() map[string]*decls.FunctionDecl {
|
||||
return e.functions
|
||||
shallowCopy := make(map[string]*decls.FunctionDecl, len(e.functions))
|
||||
for nm, fn := range e.functions {
|
||||
shallowCopy[nm] = fn
|
||||
}
|
||||
return shallowCopy
|
||||
}
|
||||
|
||||
// Variables returns a shallow copy of the variables associated with the environment.
|
||||
func (e *Env) Variables() []*decls.VariableDecl {
|
||||
shallowCopy := make([]*decls.VariableDecl, len(e.variables))
|
||||
copy(shallowCopy, e.variables)
|
||||
return shallowCopy
|
||||
}
|
||||
|
||||
// Macros returns a shallow copy of macros associated with the environment.
|
||||
func (e *Env) Macros() []Macro {
|
||||
shallowCopy := make([]Macro, len(e.macros))
|
||||
copy(shallowCopy, e.macros)
|
||||
return shallowCopy
|
||||
}
|
||||
|
||||
// HasValidator returns whether a specific ASTValidator has been configured in the environment.
|
||||
@@ -433,6 +586,11 @@ func (e *Env) HasValidator(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Validators returns the set of ASTValidators configured on the environment.
|
||||
func (e *Env) Validators() []ASTValidator {
|
||||
return e.validators[:]
|
||||
}
|
||||
|
||||
// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
|
||||
//
|
||||
// This form of Parse creates a Source value for the input `txt` and forwards to the
|
||||
@@ -502,31 +660,30 @@ func (e *Env) TypeProvider() ref.TypeProvider {
|
||||
return &interopLegacyTypeProvider{Provider: e.provider}
|
||||
}
|
||||
|
||||
// UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the
|
||||
// Env as unknown AttributePattern values.
|
||||
// UnknownVars returns a PartialActivation which marks all variables declared in the Env as
|
||||
// unknown AttributePattern values.
|
||||
//
|
||||
// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the
|
||||
// PartialAttributes option is provided as a ProgramOption.
|
||||
func (e *Env) UnknownVars() interpreter.PartialActivation {
|
||||
// Note, the UnknownVars will behave the same as an cel.NoVars() unless the PartialAttributes
|
||||
// option is provided as a ProgramOption.
|
||||
func (e *Env) UnknownVars() PartialActivation {
|
||||
act := interpreter.EmptyActivation()
|
||||
part, _ := PartialVars(act, e.computeUnknownVars(act)...)
|
||||
return part
|
||||
}
|
||||
|
||||
// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable
|
||||
// PartialVars returns a PartialActivation where all variables not in the input variable
|
||||
// set, but which have been configured in the environment, are marked as unknown.
|
||||
//
|
||||
// The `vars` value may either be an interpreter.Activation or any valid input to the
|
||||
// interpreter.NewActivation call.
|
||||
// The `vars` value may either be an Activation or any valid input to the cel.NewActivation call.
|
||||
//
|
||||
// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown
|
||||
// variables. For more advanced use cases of partial state where portions of an object graph, rather
|
||||
// than top-level variables, are missing the PartialVars() method may be a more suitable choice.
|
||||
//
|
||||
// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the
|
||||
// PartialAttributes option is provided as a ProgramOption.
|
||||
func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) {
|
||||
act, err := interpreter.NewActivation(vars)
|
||||
// Note, the PartialVars will behave the same as cel.NoVars() unless the PartialAttributes
|
||||
// option is provided as a ProgramOption.
|
||||
func (e *Env) PartialVars(vars any) (PartialActivation, error) {
|
||||
act, err := NewActivation(vars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -598,10 +755,15 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// If the default UTC timezone fix has been enabled, make sure the library is configured
|
||||
e, err = e.maybeApplyFeature(featureDefaultUTCTimeZone, Lib(timeUTCLibrary{}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// If the default UTC timezone has been disabled, configure the legacy overloads
|
||||
if utcTime, isSet := e.features[featureDefaultUTCTimeZone]; isSet && !utcTime {
|
||||
if !e.appliedFeatures[featureDefaultUTCTimeZone] {
|
||||
e.appliedFeatures[featureDefaultUTCTimeZone] = true
|
||||
e, err = Lib(timeLegacyLibrary{})(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Configure the parser.
|
||||
@@ -685,30 +847,9 @@ func (e *Env) getCheckerOrError() (*checker.Env, error) {
|
||||
return e.chk, e.chkErr
|
||||
}
|
||||
|
||||
// maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies
|
||||
// the feature if it has not already been enabled.
|
||||
func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) {
|
||||
if !e.HasFeature(feature) {
|
||||
return e, nil
|
||||
}
|
||||
_, applied := e.appliedFeatures[feature]
|
||||
if applied {
|
||||
return e, nil
|
||||
}
|
||||
e, err := option(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// record that the feature has been applied since it will generate declarations
|
||||
// and functions which will be propagated on Extend() calls and which should only
|
||||
// be registered once.
|
||||
e.appliedFeatures[feature] = true
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// computeUnknownVars determines a set of missing variables based on the input activation and the
|
||||
// environment's configured declaration set.
|
||||
func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern {
|
||||
func (e *Env) computeUnknownVars(vars Activation) []*interpreter.AttributePattern {
|
||||
var unknownPatterns []*interpreter.AttributePattern
|
||||
for _, v := range e.variables {
|
||||
varName := v.Name()
|
||||
|
||||
60
vendor/github.com/google/cel-go/cel/folding.go
generated
vendored
60
vendor/github.com/google/cel-go/cel/folding.go
generated
vendored
@@ -38,6 +38,23 @@ func MaxConstantFoldIterations(limit int) ConstantFoldingOption {
|
||||
}
|
||||
}
|
||||
|
||||
// Adds an Activation which provides known values for the folding evaluator
|
||||
//
|
||||
// Any values the activation provides will be used by the constant folder and turned into
|
||||
// literals in the AST.
|
||||
//
|
||||
// Defaults to the NoVars() Activation
|
||||
func FoldKnownValues(knownValues Activation) ConstantFoldingOption {
|
||||
return func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) {
|
||||
if knownValues != nil {
|
||||
opt.knownValues = knownValues
|
||||
} else {
|
||||
opt.knownValues = NoVars()
|
||||
}
|
||||
return opt, nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewConstantFoldingOptimizer creates an optimizer which inlines constant scalar an aggregate
|
||||
// literal values within function calls and select statements with their evaluated result.
|
||||
func NewConstantFoldingOptimizer(opts ...ConstantFoldingOption) (ASTOptimizer, error) {
|
||||
@@ -56,6 +73,7 @@ func NewConstantFoldingOptimizer(opts ...ConstantFoldingOption) (ASTOptimizer, e
|
||||
|
||||
type constantFoldingOptimizer struct {
|
||||
maxFoldIterations int
|
||||
knownValues Activation
|
||||
}
|
||||
|
||||
// Optimize queries the expression graph for scalar and aggregate literal expressions within call and
|
||||
@@ -68,7 +86,8 @@ func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST)
|
||||
// Walk the list of foldable expression and continue to fold until there are no more folds left.
|
||||
// All of the fold candidates returned by the constantExprMatcher should succeed unless there's
|
||||
// a logic bug with the selection of expressions.
|
||||
foldableExprs := ast.MatchDescendants(root, constantExprMatcher)
|
||||
constantExprMatcherCapture := func(e ast.NavigableExpr) bool { return opt.constantExprMatcher(ctx, a, e) }
|
||||
foldableExprs := ast.MatchDescendants(root, constantExprMatcherCapture)
|
||||
foldCount := 0
|
||||
for len(foldableExprs) != 0 && foldCount < opt.maxFoldIterations {
|
||||
for _, fold := range foldableExprs {
|
||||
@@ -77,21 +96,27 @@ func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST)
|
||||
if fold.Kind() == ast.CallKind && maybePruneBranches(ctx, fold) {
|
||||
continue
|
||||
}
|
||||
// Late-bound function calls cannot be folded.
|
||||
if fold.Kind() == ast.CallKind && isLateBoundFunctionCall(ctx, a, fold) {
|
||||
continue
|
||||
}
|
||||
// Otherwise, assume all context is needed to evaluate the expression.
|
||||
err := tryFold(ctx, a, fold)
|
||||
if err != nil {
|
||||
err := opt.tryFold(ctx, a, fold)
|
||||
// Ignore errors for identifiers, since there is no guarantee that the environment
|
||||
// has a value for them.
|
||||
if err != nil && fold.Kind() != ast.IdentKind {
|
||||
ctx.ReportErrorAtID(fold.ID(), "constant-folding evaluation failed: %v", err.Error())
|
||||
return a
|
||||
}
|
||||
}
|
||||
foldCount++
|
||||
foldableExprs = ast.MatchDescendants(root, constantExprMatcher)
|
||||
foldableExprs = ast.MatchDescendants(root, constantExprMatcherCapture)
|
||||
}
|
||||
// Once all of the constants have been folded, try to run through the remaining comprehensions
|
||||
// one last time. In this case, there's no guarantee they'll run, so we only update the
|
||||
// target comprehension node with the literal value if the evaluation succeeds.
|
||||
for _, compre := range ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) {
|
||||
tryFold(ctx, a, compre)
|
||||
opt.tryFold(ctx, a, compre)
|
||||
}
|
||||
|
||||
// If the output is a list, map, or struct which contains optional entries, then prune it
|
||||
@@ -121,7 +146,7 @@ func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST)
|
||||
//
|
||||
// If the evaluation succeeds, the input expr value will be modified to become a literal, otherwise
|
||||
// the method will return an error.
|
||||
func tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error {
|
||||
func (opt *constantFoldingOptimizer) tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error {
|
||||
// Assume all context is needed to evaluate the expression.
|
||||
subAST := &Ast{
|
||||
impl: ast.NewCheckedAST(ast.NewAST(expr, a.SourceInfo()), a.TypeMap(), a.ReferenceMap()),
|
||||
@@ -130,7 +155,11 @@ func tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out, _, err := prg.Eval(NoVars())
|
||||
activation := opt.knownValues
|
||||
if activation == nil {
|
||||
activation = NoVars()
|
||||
}
|
||||
out, _, err := prg.Eval(activation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -139,6 +168,15 @@ func tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func isLateBoundFunctionCall(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) bool {
|
||||
call := expr.AsCall()
|
||||
function := ctx.Functions()[call.FunctionName()]
|
||||
if function == nil {
|
||||
return false
|
||||
}
|
||||
return function.HasLateBinding()
|
||||
}
|
||||
|
||||
// maybePruneBranches inspects the non-strict call expression to determine whether
|
||||
// a branch can be removed. Evaluation will naturally prune logical and / or calls,
|
||||
// but conditional will not be pruned cleanly, so this is one small area where the
|
||||
@@ -455,13 +493,15 @@ func adaptLiteral(ctx *OptimizerContext, val ref.Val) (ast.Expr, error) {
|
||||
// Only comprehensions which are not nested are included as possible constant folds, and only
|
||||
// if all variables referenced in the comprehension stack exist are only iteration or
|
||||
// accumulation variables.
|
||||
func constantExprMatcher(e ast.NavigableExpr) bool {
|
||||
func (opt *constantFoldingOptimizer) constantExprMatcher(ctx *OptimizerContext, a *ast.AST, e ast.NavigableExpr) bool {
|
||||
switch e.Kind() {
|
||||
case ast.CallKind:
|
||||
return constantCallMatcher(e)
|
||||
case ast.SelectKind:
|
||||
sel := e.AsSelect() // guaranteed to be a navigable value
|
||||
return constantMatcher(sel.Operand().(ast.NavigableExpr))
|
||||
case ast.IdentKind:
|
||||
return opt.knownValues != nil && a.ReferenceMap()[e.ID()] != nil
|
||||
case ast.ComprehensionKind:
|
||||
if isNestedComprehension(e) {
|
||||
return false
|
||||
@@ -477,6 +517,10 @@ func constantExprMatcher(e ast.NavigableExpr) bool {
|
||||
if e.Kind() == ast.IdentKind && !vars[e.AsIdent()] {
|
||||
constantExprs = false
|
||||
}
|
||||
// Late-bound function calls cannot be folded.
|
||||
if e.Kind() == ast.CallKind && isLateBoundFunctionCall(ctx, a, e) {
|
||||
constantExprs = false
|
||||
}
|
||||
})
|
||||
ast.PreOrderVisit(e, visitor)
|
||||
return constantExprs
|
||||
|
||||
57
vendor/github.com/google/cel-go/cel/io.go
generated
vendored
57
vendor/github.com/google/cel-go/cel/io.go
generated
vendored
@@ -99,7 +99,13 @@ func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) {
|
||||
// Note, the conversion may not be an exact replica of the original expression, but will produce
|
||||
// a string that is semantically equivalent and whose textual representation is stable.
|
||||
func AstToString(a *Ast) (string, error) {
|
||||
return parser.Unparse(a.NativeRep().Expr(), a.NativeRep().SourceInfo())
|
||||
return ExprToString(a.NativeRep().Expr(), a.NativeRep().SourceInfo())
|
||||
}
|
||||
|
||||
// ExprToString converts an AST Expr node back to a string using macro call tracking metadata from
|
||||
// source info if any macros are encountered within the expression.
|
||||
func ExprToString(e ast.Expr, info *ast.SourceInfo) (string, error) {
|
||||
return parser.Unparse(e, info)
|
||||
}
|
||||
|
||||
// RefValueToValue converts between ref.Val and google.api.expr.v1alpha1.Value.
|
||||
@@ -120,6 +126,55 @@ func ValueAsAlphaProto(res ref.Val) (*exprpb.Value, error) {
|
||||
return alpha, err
|
||||
}
|
||||
|
||||
// RefValToExprValue converts between ref.Val and google.api.expr.v1alpha1.ExprValue.
|
||||
// The result ExprValue is the serialized proto form.
|
||||
func RefValToExprValue(res ref.Val) (*exprpb.ExprValue, error) {
|
||||
return ExprValueAsAlphaProto(res)
|
||||
}
|
||||
|
||||
// ExprValueAsAlphaProto converts between ref.Val and google.api.expr.v1alpha1.ExprValue.
|
||||
// The result ExprValue is the serialized proto form.
|
||||
func ExprValueAsAlphaProto(res ref.Val) (*exprpb.ExprValue, error) {
|
||||
canonical, err := ExprValueAsProto(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
alpha := &exprpb.ExprValue{}
|
||||
err = convertProto(canonical, alpha)
|
||||
return alpha, err
|
||||
}
|
||||
|
||||
// ExprValueAsProto converts between ref.Val and cel.expr.ExprValue.
|
||||
// The result ExprValue is the serialized proto form.
|
||||
func ExprValueAsProto(res ref.Val) (*celpb.ExprValue, error) {
|
||||
switch res := res.(type) {
|
||||
case *types.Unknown:
|
||||
return &celpb.ExprValue{
|
||||
Kind: &celpb.ExprValue_Unknown{
|
||||
Unknown: &celpb.UnknownSet{
|
||||
Exprs: res.IDs(),
|
||||
},
|
||||
}}, nil
|
||||
case *types.Err:
|
||||
return &celpb.ExprValue{
|
||||
Kind: &celpb.ExprValue_Error{
|
||||
Error: &celpb.ErrorSet{
|
||||
// Keeping the error code as UNKNOWN since there's no error codes associated with
|
||||
// Cel-Go runtime errors.
|
||||
Errors: []*celpb.Status{{Code: 2, Message: res.Error()}},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
default:
|
||||
val, err := ValueAsProto(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &celpb.ExprValue{
|
||||
Kind: &celpb.ExprValue_Value{Value: val}}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ValueAsProto converts between ref.Val and cel.expr.Value.
|
||||
// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
|
||||
func ValueAsProto(res ref.Val) (*celpb.Value, error) {
|
||||
|
||||
409
vendor/github.com/google/cel-go/cel/library.go
generated
vendored
409
vendor/github.com/google/cel-go/cel/library.go
generated
vendored
@@ -17,11 +17,11 @@ package cel
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/cel-go/common"
|
||||
"github.com/google/cel-go/common/ast"
|
||||
"github.com/google/cel-go/common/decls"
|
||||
"github.com/google/cel-go/common/env"
|
||||
"github.com/google/cel-go/common/operators"
|
||||
"github.com/google/cel-go/common/overloads"
|
||||
"github.com/google/cel-go/common/stdlib"
|
||||
@@ -71,6 +71,23 @@ type SingletonLibrary interface {
|
||||
LibraryName() string
|
||||
}
|
||||
|
||||
// LibraryAliaser generates a simple named alias for the library, for use during environment serialization.
|
||||
type LibraryAliaser interface {
|
||||
LibraryAlias() string
|
||||
}
|
||||
|
||||
// LibrarySubsetter provides the subset description associated with the library, nil if not subset.
|
||||
type LibrarySubsetter interface {
|
||||
LibrarySubset() *env.LibrarySubset
|
||||
}
|
||||
|
||||
// LibraryVersioner provides a version number for the library.
|
||||
//
|
||||
// If not implemented, the library version will be flagged as 'latest' during environment serialization.
|
||||
type LibraryVersioner interface {
|
||||
LibraryVersion() uint32
|
||||
}
|
||||
|
||||
// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
|
||||
// and to be linked to each other.
|
||||
func Lib(l Library) EnvOption {
|
||||
@@ -80,7 +97,7 @@ func Lib(l Library) EnvOption {
|
||||
if e.HasLibrary(singleton.LibraryName()) {
|
||||
return e, nil
|
||||
}
|
||||
e.libraries[singleton.LibraryName()] = true
|
||||
e.libraries[singleton.LibraryName()] = singleton
|
||||
}
|
||||
var err error
|
||||
for _, opt := range l.CompileOptions() {
|
||||
@@ -94,26 +111,79 @@ func Lib(l Library) EnvOption {
|
||||
}
|
||||
}
|
||||
|
||||
// StdLibOption specifies a functional option for configuring the standard CEL library.
|
||||
type StdLibOption func(*stdLibrary) *stdLibrary
|
||||
|
||||
// StdLibSubset configures the standard library to use a subset of its functions and macros.
|
||||
//
|
||||
// Since the StdLib is a singleton library, only the first instance of the StdLib() environment options
|
||||
// will be configured on the environment which means only the StdLibSubset() initially configured with
|
||||
// the library will be used.
|
||||
func StdLibSubset(subset *env.LibrarySubset) StdLibOption {
|
||||
return func(lib *stdLibrary) *stdLibrary {
|
||||
lib.subset = subset
|
||||
return lib
|
||||
}
|
||||
}
|
||||
|
||||
// StdLib returns an EnvOption for the standard library of CEL functions and macros.
|
||||
func StdLib() EnvOption {
|
||||
return Lib(stdLibrary{})
|
||||
func StdLib(opts ...StdLibOption) EnvOption {
|
||||
lib := &stdLibrary{}
|
||||
for _, o := range opts {
|
||||
lib = o(lib)
|
||||
}
|
||||
return Lib(lib)
|
||||
}
|
||||
|
||||
// stdLibrary implements the Library interface and provides functional options for the core CEL
|
||||
// features documented in the specification.
|
||||
type stdLibrary struct{}
|
||||
type stdLibrary struct {
|
||||
subset *env.LibrarySubset
|
||||
}
|
||||
|
||||
// LibraryName implements the SingletonLibrary interface method.
|
||||
func (stdLibrary) LibraryName() string {
|
||||
func (*stdLibrary) LibraryName() string {
|
||||
return "cel.lib.std"
|
||||
}
|
||||
|
||||
// LibraryAlias returns the simple name of the library.
|
||||
func (*stdLibrary) LibraryAlias() string {
|
||||
return "stdlib"
|
||||
}
|
||||
|
||||
// LibrarySubset returns the env.LibrarySubset definition associated with the CEL Library.
|
||||
func (lib *stdLibrary) LibrarySubset() *env.LibrarySubset {
|
||||
return lib.subset
|
||||
}
|
||||
|
||||
// CompileOptions returns options for the standard CEL function declarations and macros.
|
||||
func (stdLibrary) CompileOptions() []EnvOption {
|
||||
func (lib *stdLibrary) CompileOptions() []EnvOption {
|
||||
funcs := stdlib.Functions()
|
||||
macros := StandardMacros
|
||||
if lib.subset != nil {
|
||||
subMacros := []Macro{}
|
||||
for _, m := range macros {
|
||||
if lib.subset.SubsetMacro(m.Function()) {
|
||||
subMacros = append(subMacros, m)
|
||||
}
|
||||
}
|
||||
macros = subMacros
|
||||
subFuncs := []*decls.FunctionDecl{}
|
||||
for _, fn := range funcs {
|
||||
if f, include := lib.subset.SubsetFunction(fn); include {
|
||||
subFuncs = append(subFuncs, f)
|
||||
}
|
||||
}
|
||||
funcs = subFuncs
|
||||
}
|
||||
return []EnvOption{
|
||||
func(e *Env) (*Env, error) {
|
||||
var err error
|
||||
for _, fn := range stdlib.Functions() {
|
||||
if err = lib.subset.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.variables = append(e.variables, stdlib.Types()...)
|
||||
for _, fn := range funcs {
|
||||
existing, found := e.functions[fn.Name()]
|
||||
if found {
|
||||
fn, err = existing.Merge(fn)
|
||||
@@ -125,16 +195,12 @@ func (stdLibrary) CompileOptions() []EnvOption {
|
||||
}
|
||||
return e, nil
|
||||
},
|
||||
func(e *Env) (*Env, error) {
|
||||
e.variables = append(e.variables, stdlib.Types()...)
|
||||
return e, nil
|
||||
},
|
||||
Macros(StandardMacros...),
|
||||
Macros(macros...),
|
||||
}
|
||||
}
|
||||
|
||||
// ProgramOptions returns function implementations for the standard CEL functions.
|
||||
func (stdLibrary) ProgramOptions() []ProgramOption {
|
||||
func (*stdLibrary) ProgramOptions() []ProgramOption {
|
||||
return []ProgramOption{}
|
||||
}
|
||||
|
||||
@@ -263,7 +329,7 @@ func (stdLibrary) ProgramOptions() []ProgramOption {
|
||||
// be expressed with `optMap`.
|
||||
//
|
||||
// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present.
|
||||
|
||||
//
|
||||
// # First
|
||||
//
|
||||
// Introduced in version: 2
|
||||
@@ -272,7 +338,7 @@ func (stdLibrary) ProgramOptions() []ProgramOption {
|
||||
// optional.None.
|
||||
//
|
||||
// [1, 2, 3].first().value() == 1
|
||||
|
||||
//
|
||||
// # Last
|
||||
//
|
||||
// Introduced in version: 2
|
||||
@@ -283,7 +349,7 @@ func (stdLibrary) ProgramOptions() []ProgramOption {
|
||||
// [1, 2, 3].last().value() == 3
|
||||
//
|
||||
// This is syntactic sugar for msg.elements[msg.elements.size()-1].
|
||||
|
||||
//
|
||||
// # Unwrap / UnwrapOpt
|
||||
//
|
||||
// Introduced in version: 2
|
||||
@@ -293,7 +359,6 @@ func (stdLibrary) ProgramOptions() []ProgramOption {
|
||||
//
|
||||
// optional.unwrap([optional.of(42), optional.none()]) == [42]
|
||||
// [optional.of(42), optional.none()].unwrapOpt() == [42]
|
||||
|
||||
func OptionalTypes(opts ...OptionalTypesOption) EnvOption {
|
||||
lib := &optionalLib{version: math.MaxUint32}
|
||||
for _, opt := range opts {
|
||||
@@ -326,10 +391,20 @@ func OptionalTypesVersion(version uint32) OptionalTypesOption {
|
||||
}
|
||||
|
||||
// LibraryName implements the SingletonLibrary interface method.
|
||||
func (lib *optionalLib) LibraryName() string {
|
||||
func (*optionalLib) LibraryName() string {
|
||||
return "cel.lib.optional"
|
||||
}
|
||||
|
||||
// LibraryAlias returns the simple name of the library.
|
||||
func (*optionalLib) LibraryAlias() string {
|
||||
return "optional"
|
||||
}
|
||||
|
||||
// LibraryVersion returns the version of the library.
|
||||
func (lib *optionalLib) LibraryVersion() uint32 {
|
||||
return lib.version
|
||||
}
|
||||
|
||||
// CompileOptions implements the Library interface method.
|
||||
func (lib *optionalLib) CompileOptions() []EnvOption {
|
||||
paramTypeK := TypeParamType("K")
|
||||
@@ -347,16 +422,29 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
|
||||
Types(types.OptionalType),
|
||||
|
||||
// Configure the optMap and optFlatMap macros.
|
||||
Macros(ReceiverMacro(optMapMacro, 2, optMap)),
|
||||
Macros(ReceiverMacro(optMapMacro, 2, optMap,
|
||||
MacroDocs(`perform computation on the value if present and return the result as an optional`),
|
||||
MacroExamples(
|
||||
common.MultilineDescription(
|
||||
`// sub with the prefix 'dev.cel' or optional.none()`,
|
||||
`request.auth.tokens.?sub.optMap(id, 'dev.cel.' + id)`),
|
||||
`optional.none().optMap(i, i * 2) // optional.none()`))),
|
||||
|
||||
// Global and member functions for working with optional values.
|
||||
Function(optionalOfFunc,
|
||||
FunctionDocs(`create a new optional_type(T) with a value where any value is considered valid`),
|
||||
Overload("optional_of", []*Type{paramTypeV}, optionalTypeV,
|
||||
OverloadExamples(`optional.of(1) // optional(1)`),
|
||||
UnaryBinding(func(value ref.Val) ref.Val {
|
||||
return types.OptionalOf(value)
|
||||
}))),
|
||||
Function(optionalOfNonZeroValueFunc,
|
||||
FunctionDocs(`create a new optional_type(T) with a value, if the value is not a zero or empty value`),
|
||||
Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV,
|
||||
OverloadExamples(
|
||||
`optional.ofNonZeroValue(null) // optional.none()`,
|
||||
`optional.ofNonZeroValue("") // optional.none()`,
|
||||
`optional.ofNonZeroValue("hello") // optional.of('hello')`),
|
||||
UnaryBinding(func(value ref.Val) ref.Val {
|
||||
v, isZeroer := value.(traits.Zeroer)
|
||||
if !isZeroer || !v.IsZeroValue() {
|
||||
@@ -365,18 +453,26 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
|
||||
return types.OptionalNone
|
||||
}))),
|
||||
Function(optionalNoneFunc,
|
||||
FunctionDocs(`singleton value representing an optional without a value`),
|
||||
Overload("optional_none", []*Type{}, optionalTypeV,
|
||||
OverloadExamples(`optional.none()`),
|
||||
FunctionBinding(func(values ...ref.Val) ref.Val {
|
||||
return types.OptionalNone
|
||||
}))),
|
||||
Function(valueFunc,
|
||||
FunctionDocs(`obtain the value contained by the optional, error if optional.none()`),
|
||||
MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV,
|
||||
OverloadExamples(
|
||||
`optional.of(1).value() // 1`,
|
||||
`optional.none().value() // error`),
|
||||
UnaryBinding(func(value ref.Val) ref.Val {
|
||||
opt := value.(*types.Optional)
|
||||
return opt.GetValue()
|
||||
}))),
|
||||
Function(hasValueFunc,
|
||||
FunctionDocs(`determine whether the optional contains a value`),
|
||||
MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType,
|
||||
OverloadExamples(`optional.of({1: 2}).hasValue() // true`),
|
||||
UnaryBinding(func(value ref.Val) ref.Val {
|
||||
opt := value.(*types.Optional)
|
||||
return types.Bool(opt.HasValue())
|
||||
@@ -385,21 +481,43 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
|
||||
// Implementation of 'or' and 'orValue' are special-cased to support short-circuiting in the
|
||||
// evaluation chain.
|
||||
Function("or",
|
||||
MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV)),
|
||||
FunctionDocs(`chain optional expressions together, picking the first valued optional expression`),
|
||||
MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV,
|
||||
OverloadExamples(
|
||||
`optional.none().or(optional.of(1)) // optional.of(1)`,
|
||||
common.MultilineDescription(
|
||||
`// either a value from the first list, a value from the second, or optional.none()`,
|
||||
`[1, 2, 3][?x].or([3, 4, 5][?y])`)))),
|
||||
Function("orValue",
|
||||
MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV)),
|
||||
FunctionDocs(`chain optional expressions together picking the first valued optional or the default value`),
|
||||
MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV,
|
||||
OverloadExamples(
|
||||
common.MultilineDescription(
|
||||
`// pick the value for the given key if the key exists, otherwise return 'you'`,
|
||||
`{'hello': 'world', 'goodbye': 'cruel world'}[?greeting].orValue('you')`)))),
|
||||
|
||||
// OptSelect is handled specially by the type-checker, so the receiver's field type is used to determine the
|
||||
// optput type.
|
||||
Function(operators.OptSelect,
|
||||
Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV)),
|
||||
FunctionDocs(`if the field is present create an optional of the field value, otherwise return optional.none()`),
|
||||
Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV,
|
||||
OverloadExamples(
|
||||
`msg.?field // optional.of(field) if non-empty, otherwise optional.none()`,
|
||||
`msg.?field.?nested_field // optional.of(nested_field) if both field and nested_field are non-empty.`))),
|
||||
|
||||
// OptIndex is handled mostly like any other indexing operation on a list or map, so the type-checker can use
|
||||
// these signatures to determine type-agreement without any special handling.
|
||||
Function(operators.OptIndex,
|
||||
Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV),
|
||||
FunctionDocs(`if the index is present create an optional of the field value, otherwise return optional.none()`),
|
||||
Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV,
|
||||
OverloadExamples(`[1, 2, 3][?x] // element value if x is in the list size, else optional.none()`)),
|
||||
Overload("optional_list_optindex_optional_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
|
||||
Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV),
|
||||
Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV,
|
||||
OverloadExamples(
|
||||
`map_value[?key] // value at the key if present, else optional.none()`,
|
||||
common.MultilineDescription(
|
||||
`// map key-value if index is a valid map key, else optional.none()`,
|
||||
`{0: 2, 2: 4, 6: 8}[?index]`))),
|
||||
Overload("optional_map_optindex_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
|
||||
|
||||
// Index overloads to accommodate using an optional value as the operand.
|
||||
@@ -408,45 +526,62 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
|
||||
Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
|
||||
}
|
||||
if lib.version >= 1 {
|
||||
opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap)))
|
||||
opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap,
|
||||
MacroDocs(`perform computation on the value if present and produce an optional value within the computation`),
|
||||
MacroExamples(
|
||||
common.MultilineDescription(
|
||||
`// m = {'key': {}}`,
|
||||
`m.?key.optFlatMap(k, k.?subkey) // optional.none()`),
|
||||
common.MultilineDescription(
|
||||
`// m = {'key': {'subkey': 'value'}}`,
|
||||
`m.?key.optFlatMap(k, k.?subkey) // optional.of('value')`),
|
||||
))))
|
||||
}
|
||||
|
||||
if lib.version >= 2 {
|
||||
opts = append(opts, Function("last",
|
||||
FunctionDocs(`return the last value in a list if present, otherwise optional.none()`),
|
||||
MemberOverload("list_last", []*Type{listTypeV}, optionalTypeV,
|
||||
OverloadExamples(
|
||||
`[].last() // optional.none()`,
|
||||
`[1, 2, 3].last() ? optional.of(3)`),
|
||||
UnaryBinding(func(v ref.Val) ref.Val {
|
||||
list := v.(traits.Lister)
|
||||
sz := list.Size().Value().(int64)
|
||||
|
||||
if sz == 0 {
|
||||
sz := list.Size().(types.Int)
|
||||
if sz == types.IntZero {
|
||||
return types.OptionalNone
|
||||
}
|
||||
|
||||
return types.OptionalOf(list.Get(types.Int(sz - 1)))
|
||||
}),
|
||||
),
|
||||
))
|
||||
|
||||
opts = append(opts, Function("first",
|
||||
FunctionDocs(`return the first value in a list if present, otherwise optional.none()`),
|
||||
MemberOverload("list_first", []*Type{listTypeV}, optionalTypeV,
|
||||
OverloadExamples(
|
||||
`[].first() // optional.none()`,
|
||||
`[1, 2, 3].first() ? optional.of(1)`),
|
||||
UnaryBinding(func(v ref.Val) ref.Val {
|
||||
list := v.(traits.Lister)
|
||||
sz := list.Size().Value().(int64)
|
||||
|
||||
if sz == 0 {
|
||||
sz := list.Size().(types.Int)
|
||||
if sz == types.IntZero {
|
||||
return types.OptionalNone
|
||||
}
|
||||
|
||||
return types.OptionalOf(list.Get(types.Int(0)))
|
||||
}),
|
||||
),
|
||||
))
|
||||
|
||||
opts = append(opts, Function(optionalUnwrapFunc,
|
||||
FunctionDocs(`convert a list of optional values to a list containing only value which are not optional.none()`),
|
||||
Overload("optional_unwrap", []*Type{listOptionalTypeV}, listTypeV,
|
||||
OverloadExamples(`optional.unwrap([optional.of(1), optional.none()]) // [1]`),
|
||||
UnaryBinding(optUnwrap))))
|
||||
opts = append(opts, Function(unwrapOptFunc,
|
||||
FunctionDocs(`convert a list of optional values to a list containing only value which are not optional.none()`),
|
||||
MemberOverload("optional_unwrapOpt", []*Type{listOptionalTypeV}, listTypeV,
|
||||
OverloadExamples(`[optional.of(1), optional.none()].unwrapOpt() // [1]`),
|
||||
UnaryBinding(optUnwrap))))
|
||||
}
|
||||
|
||||
@@ -460,6 +595,11 @@ func (lib *optionalLib) ProgramOptions() []ProgramOption {
|
||||
}
|
||||
}
|
||||
|
||||
// Version returns the current version of the library.
|
||||
func (lib *optionalLib) Version() uint32 {
|
||||
return lib.version
|
||||
}
|
||||
|
||||
func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) {
|
||||
varIdent := args[0]
|
||||
varName := ""
|
||||
@@ -633,250 +773,99 @@ func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val {
|
||||
return opt.rhs.Eval(ctx)
|
||||
}
|
||||
|
||||
type timeUTCLibrary struct{}
|
||||
type timeLegacyLibrary struct{}
|
||||
|
||||
func (timeUTCLibrary) CompileOptions() []EnvOption {
|
||||
func (timeLegacyLibrary) CompileOptions() []EnvOption {
|
||||
return timeOverloadDeclarations
|
||||
}
|
||||
|
||||
func (timeUTCLibrary) ProgramOptions() []ProgramOption {
|
||||
func (timeLegacyLibrary) ProgramOptions() []ProgramOption {
|
||||
return []ProgramOption{}
|
||||
}
|
||||
|
||||
// Declarations and functions which enable using UTC on time.Time inputs when the timezone is unspecified
|
||||
// in the CEL expression.
|
||||
var (
|
||||
utcTZ = types.String("UTC")
|
||||
|
||||
timeOverloadDeclarations = []EnvOption{
|
||||
Function(overloads.TimeGetHours,
|
||||
MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType,
|
||||
UnaryBinding(types.DurationGetHours))),
|
||||
Function(overloads.TimeGetMinutes,
|
||||
MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType,
|
||||
UnaryBinding(types.DurationGetMinutes))),
|
||||
Function(overloads.TimeGetSeconds,
|
||||
MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType,
|
||||
UnaryBinding(types.DurationGetSeconds))),
|
||||
Function(overloads.TimeGetMilliseconds,
|
||||
MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType,
|
||||
UnaryBinding(types.DurationGetMilliseconds))),
|
||||
Function(overloads.TimeGetFullYear,
|
||||
MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetFullYear(ts, utcTZ)
|
||||
t := ts.(types.Timestamp)
|
||||
return t.Receive(overloads.TimeGetFullYear, overloads.TimestampToYear, []ref.Val{})
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToYearWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetFullYear),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetMonth,
|
||||
MemberOverload(overloads.TimestampToMonth, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetMonth(ts, utcTZ)
|
||||
t := ts.(types.Timestamp)
|
||||
return t.Receive(overloads.TimeGetMonth, overloads.TimestampToMonth, []ref.Val{})
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToMonthWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetMonth),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetDayOfYear,
|
||||
MemberOverload(overloads.TimestampToDayOfYear, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetDayOfYear(ts, utcTZ)
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToDayOfYearWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(func(ts, tz ref.Val) ref.Val {
|
||||
return timestampGetDayOfYear(ts, tz)
|
||||
t := ts.(types.Timestamp)
|
||||
return t.Receive(overloads.TimeGetDayOfYear, overloads.TimestampToDayOfYear, []ref.Val{})
|
||||
}),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetDayOfMonth,
|
||||
MemberOverload(overloads.TimestampToDayOfMonthZeroBased, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetDayOfMonthZeroBased(ts, utcTZ)
|
||||
t := ts.(types.Timestamp)
|
||||
return t.Receive(overloads.TimeGetDayOfMonth, overloads.TimestampToDayOfMonthZeroBased, []ref.Val{})
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetDayOfMonthZeroBased),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetDate,
|
||||
MemberOverload(overloads.TimestampToDayOfMonthOneBased, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetDayOfMonthOneBased(ts, utcTZ)
|
||||
t := ts.(types.Timestamp)
|
||||
return t.Receive(overloads.TimeGetDate, overloads.TimestampToDayOfMonthOneBased, []ref.Val{})
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetDayOfMonthOneBased),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetDayOfWeek,
|
||||
MemberOverload(overloads.TimestampToDayOfWeek, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetDayOfWeek(ts, utcTZ)
|
||||
t := ts.(types.Timestamp)
|
||||
return t.Receive(overloads.TimeGetDayOfWeek, overloads.TimestampToDayOfWeek, []ref.Val{})
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToDayOfWeekWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetDayOfWeek),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetHours,
|
||||
MemberOverload(overloads.TimestampToHours, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetHours(ts, utcTZ)
|
||||
t := ts.(types.Timestamp)
|
||||
return t.Receive(overloads.TimeGetHours, overloads.TimestampToHours, []ref.Val{})
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToHoursWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetHours),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetMinutes,
|
||||
MemberOverload(overloads.TimestampToMinutes, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetMinutes(ts, utcTZ)
|
||||
t := ts.(types.Timestamp)
|
||||
return t.Receive(overloads.TimeGetMinutes, overloads.TimestampToMinutes, []ref.Val{})
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToMinutesWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetMinutes),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetSeconds,
|
||||
MemberOverload(overloads.TimestampToSeconds, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetSeconds(ts, utcTZ)
|
||||
t := ts.(types.Timestamp)
|
||||
return t.Receive(overloads.TimeGetSeconds, overloads.TimestampToSeconds, []ref.Val{})
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToSecondsWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetSeconds),
|
||||
),
|
||||
),
|
||||
Function(overloads.TimeGetMilliseconds,
|
||||
MemberOverload(overloads.TimestampToMilliseconds, []*Type{TimestampType}, IntType,
|
||||
UnaryBinding(func(ts ref.Val) ref.Val {
|
||||
return timestampGetMilliseconds(ts, utcTZ)
|
||||
t := ts.(types.Timestamp)
|
||||
return t.Receive(overloads.TimeGetMilliseconds, overloads.TimestampToMilliseconds, []ref.Val{})
|
||||
}),
|
||||
),
|
||||
MemberOverload(overloads.TimestampToMillisecondsWithTz, []*Type{TimestampType, StringType}, IntType,
|
||||
BinaryBinding(timestampGetMilliseconds),
|
||||
),
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
func timestampGetFullYear(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Year())
|
||||
}
|
||||
|
||||
func timestampGetMonth(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
// CEL spec indicates that the month should be 0-based, but the Time value
|
||||
// for Month() is 1-based.
|
||||
return types.Int(t.Month() - 1)
|
||||
}
|
||||
|
||||
func timestampGetDayOfYear(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.YearDay() - 1)
|
||||
}
|
||||
|
||||
func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Day() - 1)
|
||||
}
|
||||
|
||||
func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Day())
|
||||
}
|
||||
|
||||
func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Weekday())
|
||||
}
|
||||
|
||||
func timestampGetHours(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Hour())
|
||||
}
|
||||
|
||||
func timestampGetMinutes(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Minute())
|
||||
}
|
||||
|
||||
func timestampGetSeconds(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Second())
|
||||
}
|
||||
|
||||
func timestampGetMilliseconds(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Nanosecond() / 1000000)
|
||||
}
|
||||
|
||||
func inTimeZone(ts, tz ref.Val) (time.Time, error) {
|
||||
t := ts.(types.Timestamp)
|
||||
val := string(tz.(types.String))
|
||||
ind := strings.Index(val, ":")
|
||||
if ind == -1 {
|
||||
loc, err := time.LoadLocation(val)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return t.In(loc), nil
|
||||
}
|
||||
|
||||
// If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
|
||||
// in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
|
||||
hr, err := strconv.Atoi(string(val[0:ind]))
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
min, err := strconv.Atoi(string(val[ind+1:]))
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
var offset int
|
||||
if string(val[0]) == "-" {
|
||||
offset = hr*60 - min
|
||||
} else {
|
||||
offset = hr*60 + min
|
||||
}
|
||||
secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
|
||||
timezone := time.FixedZone("", secondsEastOfUTC)
|
||||
return t.In(timezone), nil
|
||||
}
|
||||
|
||||
30
vendor/github.com/google/cel-go/cel/macro.go
generated
vendored
30
vendor/github.com/google/cel-go/cel/macro.go
generated
vendored
@@ -142,24 +142,38 @@ type MacroExprHelper interface {
|
||||
NewError(exprID int64, message string) *Error
|
||||
}
|
||||
|
||||
// MacroOpt defines a functional option for configuring macro behavior.
|
||||
type MacroOpt = parser.MacroOpt
|
||||
|
||||
// MacroDocs configures a list of strings into a multiline description for the macro.
|
||||
func MacroDocs(docs ...string) MacroOpt {
|
||||
return parser.MacroDocs(docs...)
|
||||
}
|
||||
|
||||
// MacroExamples configures a list of examples, either as a string or common.MultilineString,
|
||||
// into an example set to be provided with the macro Documentation() call.
|
||||
func MacroExamples(examples ...string) MacroOpt {
|
||||
return parser.MacroExamples(examples...)
|
||||
}
|
||||
|
||||
// GlobalMacro creates a Macro for a global function with the specified arg count.
|
||||
func GlobalMacro(function string, argCount int, factory MacroFactory) Macro {
|
||||
return parser.NewGlobalMacro(function, argCount, factory)
|
||||
func GlobalMacro(function string, argCount int, factory MacroFactory, opts ...MacroOpt) Macro {
|
||||
return parser.NewGlobalMacro(function, argCount, factory, opts...)
|
||||
}
|
||||
|
||||
// ReceiverMacro creates a Macro for a receiver function matching the specified arg count.
|
||||
func ReceiverMacro(function string, argCount int, factory MacroFactory) Macro {
|
||||
return parser.NewReceiverMacro(function, argCount, factory)
|
||||
func ReceiverMacro(function string, argCount int, factory MacroFactory, opts ...MacroOpt) Macro {
|
||||
return parser.NewReceiverMacro(function, argCount, factory, opts...)
|
||||
}
|
||||
|
||||
// GlobalVarArgMacro creates a Macro for a global function with a variable arg count.
|
||||
func GlobalVarArgMacro(function string, factory MacroFactory) Macro {
|
||||
return parser.NewGlobalVarArgMacro(function, factory)
|
||||
func GlobalVarArgMacro(function string, factory MacroFactory, opts ...MacroOpt) Macro {
|
||||
return parser.NewGlobalVarArgMacro(function, factory, opts...)
|
||||
}
|
||||
|
||||
// ReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
|
||||
func ReceiverVarArgMacro(function string, factory MacroFactory) Macro {
|
||||
return parser.NewReceiverVarArgMacro(function, factory)
|
||||
func ReceiverVarArgMacro(function string, factory MacroFactory, opts ...MacroOpt) Macro {
|
||||
return parser.NewReceiverVarArgMacro(function, factory, opts...)
|
||||
}
|
||||
|
||||
// NewGlobalMacro creates a Macro for a global function with the specified arg count.
|
||||
|
||||
227
vendor/github.com/google/cel-go/cel/options.go
generated
vendored
227
vendor/github.com/google/cel-go/cel/options.go
generated
vendored
@@ -15,6 +15,7 @@
|
||||
package cel
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -25,6 +26,8 @@ import (
|
||||
|
||||
"github.com/google/cel-go/checker"
|
||||
"github.com/google/cel-go/common/containers"
|
||||
"github.com/google/cel-go/common/decls"
|
||||
"github.com/google/cel-go/common/env"
|
||||
"github.com/google/cel-go/common/functions"
|
||||
"github.com/google/cel-go/common/types"
|
||||
"github.com/google/cel-go/common/types/pb"
|
||||
@@ -70,6 +73,26 @@ const (
|
||||
featureIdentEscapeSyntax
|
||||
)
|
||||
|
||||
var featureIDsToNames = map[int]string{
|
||||
featureEnableMacroCallTracking: "cel.feature.macro_call_tracking",
|
||||
featureCrossTypeNumericComparisons: "cel.feature.cross_type_numeric_comparisons",
|
||||
featureIdentEscapeSyntax: "cel.feature.backtick_escape_syntax",
|
||||
}
|
||||
|
||||
func featureNameByID(id int) (string, bool) {
|
||||
name, found := featureIDsToNames[id]
|
||||
return name, found
|
||||
}
|
||||
|
||||
func featureIDByName(name string) (int, bool) {
|
||||
for id, n := range featureIDsToNames {
|
||||
if n == name {
|
||||
return id, true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// EnvOption is a functional interface for configuring the environment.
|
||||
type EnvOption func(e *Env) (*Env, error)
|
||||
|
||||
@@ -112,6 +135,8 @@ func CustomTypeProvider(provider any) EnvOption {
|
||||
// Note: Declarations will by default be appended to the pre-existing declaration set configured
|
||||
// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a
|
||||
// purely custom set of declarations use NewCustomEnv.
|
||||
//
|
||||
// Deprecated: use FunctionDecls and VariableDecls or FromConfig instead.
|
||||
func Declarations(decls ...*exprpb.Decl) EnvOption {
|
||||
declOpts := []EnvOption{}
|
||||
var err error
|
||||
@@ -379,7 +404,7 @@ type ProgramOption func(p *prog) (*prog, error)
|
||||
// InterpretableDecorators can be used to inspect, alter, or replace the Program plan.
|
||||
func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
p.decorators = append(p.decorators, dec)
|
||||
p.plannerOptions = append(p.plannerOptions, interpreter.CustomDecorator(dec))
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
@@ -401,10 +426,10 @@ func Functions(funcs ...*functions.Overload) ProgramOption {
|
||||
// variables with the same name provided to the Eval() call. If Globals is used in a Library with
|
||||
// a Lib EnvOption, vars may shadow variables provided by previously added libraries.
|
||||
//
|
||||
// The vars value may either be an `interpreter.Activation` instance or a `map[string]any`.
|
||||
// The vars value may either be an `cel.Activation` instance or a `map[string]any`.
|
||||
func Globals(vars any) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
defaultVars, err := interpreter.NewActivation(vars)
|
||||
defaultVars, err := NewActivation(vars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -426,6 +451,174 @@ func OptimizeRegex(regexOptimizations ...*interpreter.RegexOptimization) Program
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigOptionFactory declares a signature which accepts a configuration element, e.g. env.Extension
|
||||
// and optionally produces an EnvOption in response.
|
||||
//
|
||||
// If there are multiple ConfigOptionFactory values which could apply to the same configuration node
|
||||
// the first one that returns an EnvOption and a `true` response will be used, and the config node
|
||||
// will not be passed along to any other option factory.
|
||||
//
|
||||
// Only the *env.Extension type is provided at this time, but validators, optimizers, and other tuning
|
||||
// parameters may be supported in the future.
|
||||
type ConfigOptionFactory func(any) (EnvOption, bool)
|
||||
|
||||
// FromConfig produces and applies a set of EnvOption values derived from an env.Config object.
|
||||
//
|
||||
// For configuration elements which refer to features outside of the `cel` package, an optional set of
|
||||
// ConfigOptionFactory values may be passed in to support the conversion from static configuration to
|
||||
// configured cel.Env value.
|
||||
//
|
||||
// Note: disabling the standard library will clear the EnvOptions values previously set for the
|
||||
// environment with the exception of propagating types and adapters over to the new environment.
|
||||
//
|
||||
// Note: to support custom types referenced in the configuration file, you must ensure that one of
|
||||
// the following options appears before the FromConfig option: Types, TypeDescs, or CustomTypeProvider
|
||||
// as the type provider configured at the time when the config is processed is the one used to derive
|
||||
// type references from the configuration.
|
||||
func FromConfig(config *env.Config, optFactories ...ConfigOptionFactory) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts, err := configToEnvOptions(config, e.CELTypeProvider(), optFactories)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, o := range opts {
|
||||
e, err = o(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// configToEnvOptions generates a set of EnvOption values (or error) based on a config, a type provider,
|
||||
// and an optional set of environment options.
|
||||
func configToEnvOptions(config *env.Config, provider types.Provider, optFactories []ConfigOptionFactory) ([]EnvOption, error) {
|
||||
envOpts := []EnvOption{}
|
||||
// Configure the standard lib subset.
|
||||
if config.StdLib != nil {
|
||||
envOpts = append(envOpts, func(e *Env) (*Env, error) {
|
||||
if e.HasLibrary("cel.lib.std") {
|
||||
return nil, errors.New("invalid subset of stdlib: create a custom env")
|
||||
}
|
||||
return e, nil
|
||||
})
|
||||
if !config.StdLib.Disabled {
|
||||
envOpts = append(envOpts, StdLib(StdLibSubset(config.StdLib)))
|
||||
}
|
||||
} else {
|
||||
envOpts = append(envOpts, StdLib())
|
||||
}
|
||||
|
||||
// Configure the container
|
||||
if config.Container != "" {
|
||||
envOpts = append(envOpts, Container(config.Container))
|
||||
}
|
||||
|
||||
// Configure abbreviations
|
||||
for _, imp := range config.Imports {
|
||||
envOpts = append(envOpts, Abbrevs(imp.Name))
|
||||
}
|
||||
|
||||
// Configure the context variable declaration
|
||||
if config.ContextVariable != nil {
|
||||
typeName := config.ContextVariable.TypeName
|
||||
if _, found := provider.FindStructType(typeName); !found {
|
||||
return nil, fmt.Errorf("invalid context proto type: %q", typeName)
|
||||
}
|
||||
// Attempt to instantiate the proto in order to reflect to its descriptor
|
||||
msg := provider.NewValue(typeName, map[string]ref.Val{})
|
||||
pbMsg, ok := msg.Value().(proto.Message)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported context type: %T", msg.Value())
|
||||
}
|
||||
envOpts = append(envOpts, DeclareContextProto(pbMsg.ProtoReflect().Descriptor()))
|
||||
}
|
||||
|
||||
// Configure variables
|
||||
if len(config.Variables) != 0 {
|
||||
vars := make([]*decls.VariableDecl, 0, len(config.Variables))
|
||||
for _, v := range config.Variables {
|
||||
vDef, err := v.AsCELVariable(provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vars = append(vars, vDef)
|
||||
}
|
||||
envOpts = append(envOpts, VariableDecls(vars...))
|
||||
}
|
||||
|
||||
// Configure functions
|
||||
if len(config.Functions) != 0 {
|
||||
funcs := make([]*decls.FunctionDecl, 0, len(config.Functions))
|
||||
for _, f := range config.Functions {
|
||||
fnDef, err := f.AsCELFunction(provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
funcs = append(funcs, fnDef)
|
||||
}
|
||||
envOpts = append(envOpts, FunctionDecls(funcs...))
|
||||
}
|
||||
|
||||
// Configure features
|
||||
for _, feat := range config.Features {
|
||||
// Note, if a feature is not found, it is skipped as it is possible the feature
|
||||
// is not intended to be supported publicly. In the future, a refinement of
|
||||
// to this strategy to report unrecognized features and validators should probably
|
||||
// be covered as a standard ConfigOptionFactory
|
||||
if id, found := featureIDByName(feat.Name); found {
|
||||
envOpts = append(envOpts, features(id, feat.Enabled))
|
||||
}
|
||||
}
|
||||
|
||||
// Configure validators
|
||||
for _, val := range config.Validators {
|
||||
if fac, found := astValidatorFactories[val.Name]; found {
|
||||
envOpts = append(envOpts, func(e *Env) (*Env, error) {
|
||||
validator, err := fac(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
return ASTValidators(validator)(e)
|
||||
})
|
||||
} else if opt, handled := handleExtendedConfigOption(val, optFactories); handled {
|
||||
envOpts = append(envOpts, opt)
|
||||
}
|
||||
// we don't error when the validator isn't found as it may be part
|
||||
// of an extension library and enabled implicitly.
|
||||
}
|
||||
|
||||
// Configure extensions
|
||||
for _, ext := range config.Extensions {
|
||||
// version number has been validated by the call to `Validate`
|
||||
ver, _ := ext.VersionNumber()
|
||||
if ext.Name == "optional" {
|
||||
envOpts = append(envOpts, OptionalTypes(OptionalTypesVersion(ver)))
|
||||
} else {
|
||||
opt, handled := handleExtendedConfigOption(ext, optFactories)
|
||||
if !handled {
|
||||
return nil, fmt.Errorf("unrecognized extension: %s", ext.Name)
|
||||
}
|
||||
envOpts = append(envOpts, opt)
|
||||
}
|
||||
}
|
||||
|
||||
return envOpts, nil
|
||||
}
|
||||
|
||||
func handleExtendedConfigOption(conf any, optFactories []ConfigOptionFactory) (EnvOption, bool) {
|
||||
for _, optFac := range optFactories {
|
||||
if opt, useOption := optFac(conf); useOption {
|
||||
return opt, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// EvalOption indicates an evaluation option that may affect the evaluation behavior or information
|
||||
// in the output result.
|
||||
type EvalOption int
|
||||
@@ -534,7 +727,7 @@ func fieldToCELType(field protoreflect.FieldDescriptor) (*Type, error) {
|
||||
return nil, fmt.Errorf("field %s type %s not implemented", field.FullName(), field.Kind().String())
|
||||
}
|
||||
|
||||
func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) {
|
||||
func fieldToVariable(field protoreflect.FieldDescriptor) (*decls.VariableDecl, error) {
|
||||
name := string(field.Name())
|
||||
if field.IsMap() {
|
||||
mapKey := field.MapKey()
|
||||
@@ -547,20 +740,20 @@ func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Variable(name, MapType(keyType, valueType)), nil
|
||||
return decls.NewVariable(name, MapType(keyType, valueType)), nil
|
||||
}
|
||||
if field.IsList() {
|
||||
elemType, err := fieldToCELType(field)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Variable(name, ListType(elemType)), nil
|
||||
return decls.NewVariable(name, ListType(elemType)), nil
|
||||
}
|
||||
celType, err := fieldToCELType(field)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Variable(name, celType), nil
|
||||
return decls.NewVariable(name, celType), nil
|
||||
}
|
||||
|
||||
// DeclareContextProto returns an option to extend CEL environment with declarations from the given context proto.
|
||||
@@ -568,17 +761,25 @@ func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) {
|
||||
// https://github.com/google/cel-spec/blob/master/doc/langdef.md#evaluation-environment
|
||||
func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
if e.contextProto != nil {
|
||||
return nil, fmt.Errorf("context proto already declared as %q, got %q",
|
||||
e.contextProto.FullName(), descriptor.FullName())
|
||||
}
|
||||
e.contextProto = descriptor
|
||||
fields := descriptor.Fields()
|
||||
vars := make([]*decls.VariableDecl, 0, fields.Len())
|
||||
for i := 0; i < fields.Len(); i++ {
|
||||
field := fields.Get(i)
|
||||
variable, err := fieldToVariable(field)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err = variable(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vars = append(vars, variable)
|
||||
}
|
||||
var err error
|
||||
e, err = VariableDecls(vars...)(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Types(dynamicpb.NewMessage(descriptor))(e)
|
||||
}
|
||||
@@ -588,7 +789,7 @@ func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption {
|
||||
//
|
||||
// Consider using with `DeclareContextProto` to simplify variable type declarations and publishing when using
|
||||
// protocol buffers.
|
||||
func ContextProtoVars(ctx proto.Message) (interpreter.Activation, error) {
|
||||
func ContextProtoVars(ctx proto.Message) (Activation, error) {
|
||||
if ctx == nil || !ctx.ProtoReflect().IsValid() {
|
||||
return interpreter.EmptyActivation(), nil
|
||||
}
|
||||
@@ -612,7 +813,7 @@ func ContextProtoVars(ctx proto.Message) (interpreter.Activation, error) {
|
||||
}
|
||||
vars[field.TextName()] = fieldVal
|
||||
}
|
||||
return interpreter.NewActivation(vars)
|
||||
return NewActivation(vars)
|
||||
}
|
||||
|
||||
// EnableMacroCallTracking ensures that call expressions which are replaced by macros
|
||||
|
||||
274
vendor/github.com/google/cel-go/cel/program.go
generated
vendored
274
vendor/github.com/google/cel-go/cel/program.go
generated
vendored
@@ -29,7 +29,7 @@ import (
|
||||
type Program interface {
|
||||
// Eval returns the result of an evaluation of the Ast and environment against the input vars.
|
||||
//
|
||||
// The vars value may either be an `interpreter.Activation` or a `map[string]any`.
|
||||
// The vars value may either be an `Activation` or a `map[string]any`.
|
||||
//
|
||||
// If the `OptTrackState`, `OptTrackCost` or `OptExhaustiveEval` flags are used, the `details` response will
|
||||
// be non-nil. Given this caveat on `details`, the return state from evaluation will be:
|
||||
@@ -47,14 +47,39 @@ type Program interface {
|
||||
// to support cancellation and timeouts. This method must be used in conjunction with the
|
||||
// InterruptCheckFrequency() option for cancellation interrupts to be impact evaluation.
|
||||
//
|
||||
// The vars value may either be an `interpreter.Activation` or `map[string]any`.
|
||||
// The vars value may either be an `Activation` or `map[string]any`.
|
||||
//
|
||||
// The output contract for `ContextEval` is otherwise identical to the `Eval` method.
|
||||
ContextEval(context.Context, any) (ref.Val, *EvalDetails, error)
|
||||
}
|
||||
|
||||
// Activation used to resolve identifiers by name and references by id.
|
||||
//
|
||||
// An Activation is the primary mechanism by which a caller supplies input into a CEL program.
|
||||
type Activation = interpreter.Activation
|
||||
|
||||
// NewActivation returns an activation based on a map-based binding where the map keys are
|
||||
// expected to be qualified names used with ResolveName calls.
|
||||
//
|
||||
// The input `bindings` may either be of type `Activation` or `map[string]any`.
|
||||
//
|
||||
// Lazy bindings may be supplied within the map-based input in either of the following forms:
|
||||
// - func() any
|
||||
// - func() ref.Val
|
||||
//
|
||||
// The output of the lazy binding will overwrite the variable reference in the internal map.
|
||||
//
|
||||
// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
|
||||
// the types.Adapter configured in the environment.
|
||||
func NewActivation(bindings any) (Activation, error) {
|
||||
return interpreter.NewActivation(bindings)
|
||||
}
|
||||
|
||||
// PartialActivation extends the Activation interface with a set of unknown AttributePatterns.
|
||||
type PartialActivation = interpreter.PartialActivation
|
||||
|
||||
// NoVars returns an empty Activation.
|
||||
func NoVars() interpreter.Activation {
|
||||
func NoVars() Activation {
|
||||
return interpreter.EmptyActivation()
|
||||
}
|
||||
|
||||
@@ -64,10 +89,9 @@ func NoVars() interpreter.Activation {
|
||||
// This method relies on manually configured sets of missing attribute patterns. For a method which
|
||||
// infers the missing variables from the input and the configured environment, use Env.PartialVars().
|
||||
//
|
||||
// The `vars` value may either be an interpreter.Activation or any valid input to the
|
||||
// interpreter.NewActivation call.
|
||||
// The `vars` value may either be an Activation or any valid input to the NewActivation call.
|
||||
func PartialVars(vars any,
|
||||
unknowns ...*interpreter.AttributePattern) (interpreter.PartialActivation, error) {
|
||||
unknowns ...*AttributePatternType) (PartialActivation, error) {
|
||||
return interpreter.NewPartialActivation(vars, unknowns...)
|
||||
}
|
||||
|
||||
@@ -84,12 +108,15 @@ func PartialVars(vars any,
|
||||
// fully qualified variable name may be `ns.app.a`, `ns.a`, or `a` per the CEL namespace resolution
|
||||
// rules. Pick the fully qualified variable name that makes sense within the container as the
|
||||
// AttributePattern `varName` argument.
|
||||
func AttributePattern(varName string) *AttributePatternType {
|
||||
return interpreter.NewAttributePattern(varName)
|
||||
}
|
||||
|
||||
// AttributePatternType represents a top-level variable with an optional set of qualifier patterns.
|
||||
//
|
||||
// See the interpreter.AttributePattern and interpreter.AttributeQualifierPattern for more info
|
||||
// about how to create and manipulate AttributePattern values.
|
||||
func AttributePattern(varName string) *interpreter.AttributePattern {
|
||||
return interpreter.NewAttributePattern(varName)
|
||||
}
|
||||
type AttributePatternType = interpreter.AttributePattern
|
||||
|
||||
// EvalDetails holds additional information observed during the Eval() call.
|
||||
type EvalDetails struct {
|
||||
@@ -120,37 +147,24 @@ func (ed *EvalDetails) ActualCost() *uint64 {
|
||||
type prog struct {
|
||||
*Env
|
||||
evalOpts EvalOption
|
||||
defaultVars interpreter.Activation
|
||||
defaultVars Activation
|
||||
dispatcher interpreter.Dispatcher
|
||||
interpreter interpreter.Interpreter
|
||||
interruptCheckFrequency uint
|
||||
|
||||
// Intermediate state used to configure the InterpretableDecorator set provided
|
||||
// to the initInterpretable call.
|
||||
decorators []interpreter.InterpretableDecorator
|
||||
plannerOptions []interpreter.PlannerOption
|
||||
regexOptimizations []*interpreter.RegexOptimization
|
||||
|
||||
// Interpretable configured from an Ast and aggregate decorator set based on program options.
|
||||
interpretable interpreter.Interpretable
|
||||
observable *interpreter.ObservableInterpretable
|
||||
callCostEstimator interpreter.ActualCostEstimator
|
||||
costOptions []interpreter.CostTrackerOption
|
||||
costLimit *uint64
|
||||
}
|
||||
|
||||
func (p *prog) clone() *prog {
|
||||
costOptsCopy := make([]interpreter.CostTrackerOption, len(p.costOptions))
|
||||
copy(costOptsCopy, p.costOptions)
|
||||
|
||||
return &prog{
|
||||
Env: p.Env,
|
||||
evalOpts: p.evalOpts,
|
||||
defaultVars: p.defaultVars,
|
||||
dispatcher: p.dispatcher,
|
||||
interpreter: p.interpreter,
|
||||
interruptCheckFrequency: p.interruptCheckFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
// newProgram creates a program instance with an environment, an ast, and an optional list of
|
||||
// ProgramOption values.
|
||||
//
|
||||
@@ -162,10 +176,10 @@ func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) {
|
||||
// Ensure the default attribute factory is set after the adapter and provider are
|
||||
// configured.
|
||||
p := &prog{
|
||||
Env: e,
|
||||
decorators: []interpreter.InterpretableDecorator{},
|
||||
dispatcher: disp,
|
||||
costOptions: []interpreter.CostTrackerOption{},
|
||||
Env: e,
|
||||
plannerOptions: []interpreter.PlannerOption{},
|
||||
dispatcher: disp,
|
||||
costOptions: []interpreter.CostTrackerOption{},
|
||||
}
|
||||
|
||||
// Configure the program via the ProgramOption values.
|
||||
@@ -203,74 +217,71 @@ func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) {
|
||||
p.interpreter = interp
|
||||
|
||||
// Translate the EvalOption flags into InterpretableDecorator instances.
|
||||
decorators := make([]interpreter.InterpretableDecorator, len(p.decorators))
|
||||
copy(decorators, p.decorators)
|
||||
plannerOptions := make([]interpreter.PlannerOption, len(p.plannerOptions))
|
||||
copy(plannerOptions, p.plannerOptions)
|
||||
|
||||
// Enable interrupt checking if there's a non-zero check frequency
|
||||
if p.interruptCheckFrequency > 0 {
|
||||
decorators = append(decorators, interpreter.InterruptableEval())
|
||||
plannerOptions = append(plannerOptions, interpreter.InterruptableEval())
|
||||
}
|
||||
// Enable constant folding first.
|
||||
if p.evalOpts&OptOptimize == OptOptimize {
|
||||
decorators = append(decorators, interpreter.Optimize())
|
||||
plannerOptions = append(plannerOptions, interpreter.Optimize())
|
||||
p.regexOptimizations = append(p.regexOptimizations, interpreter.MatchesRegexOptimization)
|
||||
}
|
||||
// Enable regex compilation of constants immediately after folding constants.
|
||||
if len(p.regexOptimizations) > 0 {
|
||||
decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
|
||||
plannerOptions = append(plannerOptions, interpreter.CompileRegexConstants(p.regexOptimizations...))
|
||||
}
|
||||
|
||||
// Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
|
||||
if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
|
||||
factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) {
|
||||
costTracker.Estimator = p.callCostEstimator
|
||||
costTracker.Limit = p.costLimit
|
||||
for _, costOpt := range p.costOptions {
|
||||
err := costOpt(costTracker)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This
|
||||
// prevents the underlying memory from being shared between factory function calls causing
|
||||
// undesired mutations.
|
||||
decs := decorators[:len(decorators):len(decorators)]
|
||||
var observers []interpreter.EvalObserver
|
||||
|
||||
if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 {
|
||||
// EvalStateObserver is required for OptExhaustiveEval.
|
||||
observers = append(observers, interpreter.EvalStateObserver(state))
|
||||
}
|
||||
if p.evalOpts&OptTrackCost == OptTrackCost {
|
||||
observers = append(observers, interpreter.CostObserver(costTracker))
|
||||
}
|
||||
|
||||
// Enable exhaustive eval over a basic observer since it offers a superset of features.
|
||||
if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval {
|
||||
decs = append(decs, interpreter.ExhaustiveEval(), interpreter.Observe(observers...))
|
||||
} else if len(observers) > 0 {
|
||||
decs = append(decs, interpreter.Observe(observers...))
|
||||
}
|
||||
|
||||
return p.clone().initInterpretable(a, decs)
|
||||
costOptCount := len(p.costOptions)
|
||||
if p.costLimit != nil {
|
||||
costOptCount++
|
||||
}
|
||||
costOpts := make([]interpreter.CostTrackerOption, 0, costOptCount)
|
||||
costOpts = append(costOpts, p.costOptions...)
|
||||
if p.costLimit != nil {
|
||||
costOpts = append(costOpts, interpreter.CostTrackerLimit(*p.costLimit))
|
||||
}
|
||||
trackerFactory := func() (*interpreter.CostTracker, error) {
|
||||
return interpreter.NewCostTracker(p.callCostEstimator, costOpts...)
|
||||
}
|
||||
var observers []interpreter.PlannerOption
|
||||
if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 {
|
||||
// EvalStateObserver is required for OptExhaustiveEval.
|
||||
observers = append(observers, interpreter.EvalStateObserver())
|
||||
}
|
||||
if p.evalOpts&OptTrackCost == OptTrackCost {
|
||||
observers = append(observers, interpreter.CostObserver(interpreter.CostTrackerFactory(trackerFactory)))
|
||||
}
|
||||
// Enable exhaustive eval over a basic observer since it offers a superset of features.
|
||||
if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval {
|
||||
plannerOptions = append(plannerOptions,
|
||||
append([]interpreter.PlannerOption{interpreter.ExhaustiveEval()}, observers...)...)
|
||||
} else if len(observers) > 0 {
|
||||
plannerOptions = append(plannerOptions, observers...)
|
||||
}
|
||||
return newProgGen(factory)
|
||||
}
|
||||
return p.initInterpretable(a, decorators)
|
||||
return p.initInterpretable(a, plannerOptions)
|
||||
}
|
||||
|
||||
func (p *prog) initInterpretable(a *ast.AST, decs []interpreter.InterpretableDecorator) (*prog, error) {
|
||||
func (p *prog) initInterpretable(a *ast.AST, plannerOptions []interpreter.PlannerOption) (*prog, error) {
|
||||
// When the AST has been exprAST it contains metadata that can be used to speed up program execution.
|
||||
interpretable, err := p.interpreter.NewInterpretable(a, decs...)
|
||||
interpretable, err := p.interpreter.NewInterpretable(a, plannerOptions...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.interpretable = interpretable
|
||||
if oi, ok := interpretable.(*interpreter.ObservableInterpretable); ok {
|
||||
p.observable = oi
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Eval implements the Program interface method.
|
||||
func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
|
||||
func (p *prog) Eval(input any) (out ref.Val, det *EvalDetails, err error) {
|
||||
// Configure error recovery for unexpected panics during evaluation. Note, the use of named
|
||||
// return values makes it possible to modify the error response during the recovery
|
||||
// function.
|
||||
@@ -285,9 +296,9 @@ func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
|
||||
}
|
||||
}()
|
||||
// Build a hierarchical activation if there are default vars set.
|
||||
var vars interpreter.Activation
|
||||
var vars Activation
|
||||
switch v := input.(type) {
|
||||
case interpreter.Activation:
|
||||
case Activation:
|
||||
vars = v
|
||||
case map[string]any:
|
||||
vars = activationPool.Setup(v)
|
||||
@@ -298,12 +309,24 @@ func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
|
||||
if p.defaultVars != nil {
|
||||
vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
|
||||
}
|
||||
v = p.interpretable.Eval(vars)
|
||||
if p.observable != nil {
|
||||
det = &EvalDetails{}
|
||||
out = p.observable.ObserveEval(vars, func(observed any) {
|
||||
switch o := observed.(type) {
|
||||
case interpreter.EvalState:
|
||||
det.state = o
|
||||
case *interpreter.CostTracker:
|
||||
det.costTracker = o
|
||||
}
|
||||
})
|
||||
} else {
|
||||
out = p.interpretable.Eval(vars)
|
||||
}
|
||||
// The output of an internal Eval may have a value (`v`) that is a types.Err. This step
|
||||
// translates the CEL value to a Go error response. This interface does not quite match the
|
||||
// RPC signature which allows for multiple errors to be returned, but should be sufficient.
|
||||
if types.IsError(v) {
|
||||
err = v.(*types.Err)
|
||||
if types.IsError(out) {
|
||||
err = out.(*types.Err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -315,9 +338,9 @@ func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetail
|
||||
}
|
||||
// Configure the input, making sure to wrap Activation inputs in the special ctxActivation which
|
||||
// exposes the #interrupted variable and manages rate-limited checks of the ctx.Done() state.
|
||||
var vars interpreter.Activation
|
||||
var vars Activation
|
||||
switch v := input.(type) {
|
||||
case interpreter.Activation:
|
||||
case Activation:
|
||||
vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency)
|
||||
defer ctxActivationPool.Put(vars)
|
||||
case map[string]any:
|
||||
@@ -331,90 +354,8 @@ func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetail
|
||||
return p.Eval(vars)
|
||||
}
|
||||
|
||||
// progFactory is a helper alias for marking a program creation factory function.
|
||||
type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error)
|
||||
|
||||
// progGen holds a reference to a progFactory instance and implements the Program interface.
|
||||
type progGen struct {
|
||||
factory progFactory
|
||||
}
|
||||
|
||||
// newProgGen tests the factory object by calling it once and returns a factory-based Program if
|
||||
// the test is successful.
|
||||
func newProgGen(factory progFactory) (Program, error) {
|
||||
// Test the factory to make sure that configuration errors are spotted at config
|
||||
tracker, err := interpreter.NewCostTracker(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = factory(interpreter.NewEvalState(), tracker)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &progGen{factory: factory}, nil
|
||||
}
|
||||
|
||||
// Eval implements the Program interface method.
|
||||
func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {
|
||||
// The factory based Eval() differs from the standard evaluation model in that it generates a
|
||||
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful
|
||||
// results.
|
||||
state := interpreter.NewEvalState()
|
||||
costTracker, err := interpreter.NewCostTracker(nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
det := &EvalDetails{state: state, costTracker: costTracker}
|
||||
|
||||
// Generate a new instance of the interpretable using the factory configured during the call to
|
||||
// newProgram(). It is incredibly unlikely that the factory call will generate an error given
|
||||
// the factory test performed within the Program() call.
|
||||
p, err := gen.factory(state, costTracker)
|
||||
if err != nil {
|
||||
return nil, det, err
|
||||
}
|
||||
|
||||
// Evaluate the input, returning the result and the 'state' within EvalDetails.
|
||||
v, _, err := p.Eval(input)
|
||||
if err != nil {
|
||||
return v, det, err
|
||||
}
|
||||
return v, det, nil
|
||||
}
|
||||
|
||||
// ContextEval implements the Program interface method.
|
||||
func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, fmt.Errorf("context can not be nil")
|
||||
}
|
||||
// The factory based Eval() differs from the standard evaluation model in that it generates a
|
||||
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful
|
||||
// results.
|
||||
state := interpreter.NewEvalState()
|
||||
costTracker, err := interpreter.NewCostTracker(nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
det := &EvalDetails{state: state, costTracker: costTracker}
|
||||
|
||||
// Generate a new instance of the interpretable using the factory configured during the call to
|
||||
// newProgram(). It is incredibly unlikely that the factory call will generate an error given
|
||||
// the factory test performed within the Program() call.
|
||||
p, err := gen.factory(state, costTracker)
|
||||
if err != nil {
|
||||
return nil, det, err
|
||||
}
|
||||
|
||||
// Evaluate the input, returning the result and the 'state' within EvalDetails.
|
||||
v, _, err := p.ContextEval(ctx, input)
|
||||
if err != nil {
|
||||
return v, det, err
|
||||
}
|
||||
return v, det, nil
|
||||
}
|
||||
|
||||
type ctxEvalActivation struct {
|
||||
parent interpreter.Activation
|
||||
parent Activation
|
||||
interrupt <-chan struct{}
|
||||
interruptCheckCount uint
|
||||
interruptCheckFrequency uint
|
||||
@@ -438,10 +379,15 @@ func (a *ctxEvalActivation) ResolveName(name string) (any, bool) {
|
||||
return a.parent.ResolveName(name)
|
||||
}
|
||||
|
||||
func (a *ctxEvalActivation) Parent() interpreter.Activation {
|
||||
func (a *ctxEvalActivation) Parent() Activation {
|
||||
return a.parent
|
||||
}
|
||||
|
||||
func (a *ctxEvalActivation) AsPartialActivation() (interpreter.PartialActivation, bool) {
|
||||
pa, ok := a.parent.(interpreter.PartialActivation)
|
||||
return pa, ok
|
||||
}
|
||||
|
||||
func newCtxEvalActivationPool() *ctxEvalActivationPool {
|
||||
return &ctxEvalActivationPool{
|
||||
Pool: sync.Pool{
|
||||
@@ -457,7 +403,7 @@ type ctxEvalActivationPool struct {
|
||||
}
|
||||
|
||||
// Setup initializes a pooled Activation with the ability check for context.Context cancellation
|
||||
func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation {
|
||||
func (p *ctxEvalActivationPool) Setup(vars Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation {
|
||||
a := p.Pool.Get().(*ctxEvalActivation)
|
||||
a.parent = vars
|
||||
a.interrupt = done
|
||||
@@ -506,8 +452,8 @@ func (a *evalActivation) ResolveName(name string) (any, bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// Parent implements the interpreter.Activation interface
|
||||
func (a *evalActivation) Parent() interpreter.Activation {
|
||||
// Parent implements the Activation interface
|
||||
func (a *evalActivation) Parent() Activation {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
155
vendor/github.com/google/cel-go/cel/prompt.go
generated
vendored
Normal file
155
vendor/github.com/google/cel-go/cel/prompt.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cel
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/google/cel-go/common"
|
||||
"github.com/google/cel-go/common/operators"
|
||||
"github.com/google/cel-go/common/overloads"
|
||||
)
|
||||
|
||||
//go:embed templates/authoring.tmpl
|
||||
var authoringPrompt string
|
||||
|
||||
// AuthoringPrompt creates a prompt template from a CEL environment for the purpose of AI-assisted authoring.
|
||||
func AuthoringPrompt(env *Env) (*Prompt, error) {
|
||||
funcMap := template.FuncMap{
|
||||
"split": func(str string) []string { return strings.Split(str, "\n") },
|
||||
}
|
||||
tmpl := template.New("cel").Funcs(funcMap)
|
||||
tmpl, err := tmpl.Parse(authoringPrompt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Prompt{
|
||||
Persona: defaultPersona,
|
||||
FormatRules: defaultFormatRules,
|
||||
GeneralUsage: defaultGeneralUsage,
|
||||
tmpl: tmpl,
|
||||
env: env,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Prompt represents the core components of an LLM prompt based on a CEL environment.
|
||||
//
|
||||
// All fields of the prompt may be overwritten / modified with support for rendering the
|
||||
// prompt to a human-readable string.
|
||||
type Prompt struct {
|
||||
// Persona indicates something about the kind of user making the request
|
||||
Persona string
|
||||
|
||||
// FormatRules indicate how the LLM should generate its output
|
||||
FormatRules string
|
||||
|
||||
// GeneralUsage specifies additional context on how CEL should be used.
|
||||
GeneralUsage string
|
||||
|
||||
// tmpl is the text template base-configuration for rendering text.
|
||||
tmpl *template.Template
|
||||
|
||||
// env reference used to collect variables, functions, and macros available to the prompt.
|
||||
env *Env
|
||||
}
|
||||
|
||||
type promptInst struct {
|
||||
*Prompt
|
||||
|
||||
Variables []*common.Doc
|
||||
Macros []*common.Doc
|
||||
Functions []*common.Doc
|
||||
UserPrompt string
|
||||
}
|
||||
|
||||
// Render renders the user prompt with the associated context from the prompt template
|
||||
// for use with LLM generators.
|
||||
func (p *Prompt) Render(userPrompt string) string {
|
||||
var buffer strings.Builder
|
||||
vars := make([]*common.Doc, len(p.env.Variables()))
|
||||
for i, v := range p.env.Variables() {
|
||||
vars[i] = v.Documentation()
|
||||
}
|
||||
sort.SliceStable(vars, func(i, j int) bool {
|
||||
return vars[i].Name < vars[j].Name
|
||||
})
|
||||
macs := make([]*common.Doc, len(p.env.Macros()))
|
||||
for i, m := range p.env.Macros() {
|
||||
macs[i] = m.(common.Documentor).Documentation()
|
||||
}
|
||||
funcs := make([]*common.Doc, 0, len(p.env.Functions()))
|
||||
for _, f := range p.env.Functions() {
|
||||
if _, hidden := hiddenFunctions[f.Name()]; hidden {
|
||||
continue
|
||||
}
|
||||
funcs = append(funcs, f.Documentation())
|
||||
}
|
||||
sort.SliceStable(funcs, func(i, j int) bool {
|
||||
return funcs[i].Name < funcs[j].Name
|
||||
})
|
||||
inst := &promptInst{
|
||||
Prompt: p,
|
||||
Variables: vars,
|
||||
Macros: macs,
|
||||
Functions: funcs,
|
||||
UserPrompt: userPrompt}
|
||||
p.tmpl.Execute(&buffer, inst)
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
const (
|
||||
defaultPersona = `You are a software engineer with expertise in networking and application security
|
||||
authoring boolean Common Expression Language (CEL) expressions to ensure firewall,
|
||||
networking, authentication, and data access is only permitted when all conditions
|
||||
are satisfied.`
|
||||
|
||||
defaultFormatRules = `Output your response as a CEL expression.
|
||||
|
||||
Write the expression with the comment on the first line and the expression on the
|
||||
subsequent lines. Format the expression using 80-character line limits commonly
|
||||
found in C++ or Java code.`
|
||||
|
||||
defaultGeneralUsage = `CEL supports Protocol Buffer and JSON types, as well as simple types and aggregate types.
|
||||
|
||||
Simple types include bool, bytes, double, int, string, and uint:
|
||||
|
||||
* double literals must always include a decimal point: 1.0, 3.5, -2.2
|
||||
* uint literals must be positive values suffixed with a 'u': 42u
|
||||
* byte literals are strings prefixed with a 'b': b'1235'
|
||||
* string literals can use either single quotes or double quotes: 'hello', "world"
|
||||
* string literals can also be treated as raw strings that do not require any
|
||||
escaping within the string by using the 'R' prefix: R"""quote: "hi" """
|
||||
|
||||
Aggregate types include list and map:
|
||||
|
||||
* list literals consist of zero or more values between brackets: "['a', 'b', 'c']"
|
||||
* map literal consist of colon-separated key-value pairs within braces: "{'key1': 1, 'key2': 2}"
|
||||
* Only int, uint, string, and bool types are valid map keys.
|
||||
* Maps containing HTTP headers must always use lower-cased string keys.
|
||||
|
||||
Comments start with two-forward slashes followed by text and a newline.`
|
||||
)
|
||||
|
||||
var (
|
||||
hiddenFunctions = map[string]bool{
|
||||
overloads.DeprecatedIn: true,
|
||||
operators.OldIn: true,
|
||||
operators.OldNotStrictlyFalse: true,
|
||||
operators.NotStrictlyFalse: true,
|
||||
}
|
||||
)
|
||||
56
vendor/github.com/google/cel-go/cel/templates/authoring.tmpl
generated
vendored
Normal file
56
vendor/github.com/google/cel-go/cel/templates/authoring.tmpl
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
{{define "variable"}}{{.Name}} is a {{.Type}}
|
||||
{{- end -}}
|
||||
|
||||
{{define "macro" -}}
|
||||
{{.Name}} macro{{if .Description}} - {{range split .Description}}{{.}} {{end}}
|
||||
{{end}}
|
||||
{{range .Children}}{{range split .Description}} {{.}}
|
||||
{{end}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{define "overload" -}}
|
||||
{{if .Children}}{{range .Children}}{{range split .Description}} {{.}}
|
||||
{{end}}
|
||||
{{- end -}}
|
||||
{{else}} {{.Signature}}
|
||||
{{end}}
|
||||
{{- end -}}
|
||||
|
||||
{{define "function" -}}
|
||||
{{.Name}}{{if .Description}} - {{range split .Description}}{{.}} {{end}}
|
||||
{{end}}
|
||||
{{range .Children}}{{template "overload" .}}{{end}}
|
||||
{{- end -}}
|
||||
|
||||
{{.Persona}}
|
||||
|
||||
{{.FormatRules}}
|
||||
|
||||
{{if or .Variables .Macros .Functions -}}
|
||||
Only use the following variables, macros, and functions in expressions.
|
||||
{{if .Variables}}
|
||||
Variables:
|
||||
|
||||
{{range .Variables}}* {{template "variable" .}}
|
||||
{{end -}}
|
||||
|
||||
{{end -}}
|
||||
{{if .Macros}}
|
||||
Macros:
|
||||
|
||||
{{range .Macros}}* {{template "macro" .}}
|
||||
{{end -}}
|
||||
|
||||
{{end -}}
|
||||
{{if .Functions}}
|
||||
Functions:
|
||||
|
||||
{{range .Functions}}* {{template "function" .}}
|
||||
{{end -}}
|
||||
|
||||
{{end -}}
|
||||
{{- end -}}
|
||||
{{.GeneralUsage}}
|
||||
|
||||
{{.UserPrompt}}
|
||||
70
vendor/github.com/google/cel-go/cel/validator.go
generated
vendored
70
vendor/github.com/google/cel-go/cel/validator.go
generated
vendored
@@ -20,11 +20,16 @@ import (
|
||||
"regexp"
|
||||
|
||||
"github.com/google/cel-go/common/ast"
|
||||
"github.com/google/cel-go/common/env"
|
||||
"github.com/google/cel-go/common/overloads"
|
||||
)
|
||||
|
||||
const (
|
||||
homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous"
|
||||
durationValidatorName = "cel.validator.duration"
|
||||
regexValidatorName = "cel.validator.matches"
|
||||
timestampValidatorName = "cel.validator.timestamp"
|
||||
homogeneousValidatorName = "cel.validator.homogeneous_literals"
|
||||
nestingLimitValidatorName = "cel.validator.comprehension_nesting_limit"
|
||||
|
||||
// HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure
|
||||
// the set of function names which are exempt from homogeneous type checks. The expected type
|
||||
@@ -36,6 +41,35 @@ const (
|
||||
HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt"
|
||||
)
|
||||
|
||||
var (
|
||||
astValidatorFactories = map[string]ASTValidatorFactory{
|
||||
nestingLimitValidatorName: func(val *env.Validator) (ASTValidator, error) {
|
||||
if limit, found := val.ConfigValue("limit"); found {
|
||||
if val, isInt := limit.(int); isInt {
|
||||
return ValidateComprehensionNestingLimit(val), nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid validator: %s unsupported limit type: %v", nestingLimitValidatorName, limit)
|
||||
}
|
||||
return nil, fmt.Errorf("invalid validator: %s missing limit", nestingLimitValidatorName)
|
||||
},
|
||||
durationValidatorName: func(*env.Validator) (ASTValidator, error) {
|
||||
return ValidateDurationLiterals(), nil
|
||||
},
|
||||
regexValidatorName: func(*env.Validator) (ASTValidator, error) {
|
||||
return ValidateRegexLiterals(), nil
|
||||
},
|
||||
timestampValidatorName: func(*env.Validator) (ASTValidator, error) {
|
||||
return ValidateTimestampLiterals(), nil
|
||||
},
|
||||
homogeneousValidatorName: func(*env.Validator) (ASTValidator, error) {
|
||||
return ValidateHomogeneousAggregateLiterals(), nil
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// ASTValidatorFactory creates an ASTValidator as configured by the input map
|
||||
type ASTValidatorFactory func(*env.Validator) (ASTValidator, error)
|
||||
|
||||
// ASTValidators configures a set of ASTValidator instances into the target environment.
|
||||
//
|
||||
// Validators are applied in the order in which the are specified and are treated as singletons.
|
||||
@@ -70,6 +104,18 @@ type ASTValidator interface {
|
||||
Validate(*Env, ValidatorConfig, *ast.AST, *Issues)
|
||||
}
|
||||
|
||||
// ConfigurableASTValidator supports conversion of an object to an `env.Validator` instance used for
|
||||
// YAML serialization.
|
||||
type ConfigurableASTValidator interface {
|
||||
// ToConfig converts the internal configuration of an ASTValidator into an env.Validator instance
|
||||
// which minimally must include the validator name, but may also include a map[string]any config
|
||||
// object to be serialized to YAML. The string keys represent the configuration parameter name,
|
||||
// and the any value must mirror the internally supported type associated with the config key.
|
||||
//
|
||||
// Note: only primitive CEL types are supported by CEL validators at this time.
|
||||
ToConfig() *env.Validator
|
||||
}
|
||||
|
||||
// ValidatorConfig provides an accessor method for querying validator configuration state.
|
||||
type ValidatorConfig interface {
|
||||
GetOrDefault(name string, value any) any
|
||||
@@ -196,7 +242,12 @@ type formatValidator struct {
|
||||
|
||||
// Name returns the unique name of this function format validator.
|
||||
func (v formatValidator) Name() string {
|
||||
return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName)
|
||||
return fmt.Sprintf("cel.validator.%s", v.funcName)
|
||||
}
|
||||
|
||||
// ToConfig converts the ASTValidator to an env.Validator specifying the validator name.
|
||||
func (v formatValidator) ToConfig() *env.Validator {
|
||||
return env.NewValidator(v.Name())
|
||||
}
|
||||
|
||||
// Validate searches the AST for uses of a given function name with a constant argument and performs a check
|
||||
@@ -242,6 +293,11 @@ func (homogeneousAggregateLiteralValidator) Name() string {
|
||||
return homogeneousValidatorName
|
||||
}
|
||||
|
||||
// ToConfig converts the ASTValidator to an env.Validator specifying the validator name.
|
||||
func (v homogeneousAggregateLiteralValidator) ToConfig() *env.Validator {
|
||||
return env.NewValidator(v.Name())
|
||||
}
|
||||
|
||||
// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types.
|
||||
//
|
||||
// This validator makes an exception for list and map literals which occur at any level of nesting within
|
||||
@@ -336,10 +392,18 @@ type nestingLimitValidator struct {
|
||||
limit int
|
||||
}
|
||||
|
||||
// Name returns the name of the nesting limit validator.
|
||||
func (v nestingLimitValidator) Name() string {
|
||||
return "cel.lib.std.validate.comprehension_nesting_limit"
|
||||
return nestingLimitValidatorName
|
||||
}
|
||||
|
||||
// ToConfig converts the ASTValidator to an env.Validator specifying the validator name and the nesting limit
|
||||
// as an integer value: {"limit": int}
|
||||
func (v nestingLimitValidator) ToConfig() *env.Validator {
|
||||
return env.NewValidator(v.Name()).SetConfig(map[string]any{"limit": v.limit})
|
||||
}
|
||||
|
||||
// Validate implements the ASTValidator interface method.
|
||||
func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) {
|
||||
root := ast.NavigateAST(a)
|
||||
comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind))
|
||||
|
||||
11
vendor/github.com/google/cel-go/checker/checker.go
generated
vendored
11
vendor/github.com/google/cel-go/checker/checker.go
generated
vendored
@@ -145,6 +145,17 @@ func (c *checker) checkSelect(e ast.Expr) {
|
||||
func (c *checker) checkOptSelect(e ast.Expr) {
|
||||
// Collect metadata related to the opt select call packaged by the parser.
|
||||
call := e.AsCall()
|
||||
if len(call.Args()) != 2 || call.IsMemberFunction() {
|
||||
t := ""
|
||||
if call.IsMemberFunction() {
|
||||
t = " member call with"
|
||||
}
|
||||
c.errors.notAnOptionalFieldSelectionCall(e.ID(), c.location(e),
|
||||
fmt.Sprintf(
|
||||
"incorrect signature.%s argument count: %d", t, len(call.Args())))
|
||||
return
|
||||
}
|
||||
|
||||
operand := call.Args()[0]
|
||||
field := call.Args()[1]
|
||||
fieldName, isString := maybeUnwrapString(field)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user