mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
bump to k8s 1.28 beta.0
This commit is contained in:
68
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
generated
vendored
Normal file
68
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
Package antlr implements the Go version of the ANTLR 4 runtime.
|
||||
|
||||
# The ANTLR Tool
|
||||
|
||||
ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
|
||||
or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
|
||||
From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
|
||||
(or visitor) that makes it easy to respond to the recognition of phrases of interest.
|
||||
|
||||
# Code Generation
|
||||
|
||||
ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
|
||||
runtime library, written specifically to support the generated code in the target language. This library is the
|
||||
runtime for the Go target.
|
||||
|
||||
To generate code for the go target, it is generally recommended to place the source grammar files in a package of
|
||||
their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
|
||||
it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
|
||||
that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
|
||||
way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
|
||||
your IDE, or configuration in your CI system.
|
||||
|
||||
Here is a general template for an ANTLR based recognizer in Go:
|
||||
|
||||
.
|
||||
├── myproject
|
||||
├── parser
|
||||
│ ├── mygrammar.g4
|
||||
│ ├── antlr-4.12.0-complete.jar
|
||||
│ ├── error_listeners.go
|
||||
│ ├── generate.go
|
||||
│ ├── generate.sh
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
├── main.go
|
||||
└── main_test.go
|
||||
|
||||
Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
|
||||
The generate.go file then looks like this:
|
||||
|
||||
package parser
|
||||
|
||||
//go:generate ./generate.sh
|
||||
|
||||
And the generate.sh file will look similar to this:
|
||||
|
||||
#!/bin/sh
|
||||
|
||||
alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
|
||||
antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
|
||||
|
||||
depending on whether you want visitors or listeners or any other ANTLR options.
|
||||
|
||||
From the command line at the root of your package “myproject” you can then simply issue the command:
|
||||
|
||||
go generate ./...
|
||||
|
||||
# Copyright Notice
|
||||
|
||||
Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
|
||||
Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
|
||||
|
||||
[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
|
||||
[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
|
||||
*/
|
||||
package antlr
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -6,11 +6,24 @@ package antlr
|
||||
|
||||
import "sync"
|
||||
|
||||
// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
|
||||
// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
|
||||
var ATNInvalidAltNumber int
|
||||
|
||||
// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term
|
||||
// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]”
|
||||
// in existence.
|
||||
//
|
||||
// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)].
|
||||
//
|
||||
// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network
|
||||
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
|
||||
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
|
||||
type ATN struct {
|
||||
// DecisionToState is the decision points for all rules, subrules, optional
|
||||
// blocks, ()+, ()*, etc. Used to build DFA predictors for them.
|
||||
// blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
|
||||
// can go back later and build DFA predictors for them. This includes
|
||||
// all the rules, subrules, optional blocks, ()+, ()* etc...
|
||||
DecisionToState []DecisionState
|
||||
|
||||
// grammarType is the ATN type and is used for deserializing ATNs from strings.
|
||||
@@ -45,6 +58,8 @@ type ATN struct {
|
||||
edgeMu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewATN returns a new ATN struct representing the given grammarType and is used
|
||||
// for runtime deserialization of ATNs from the code generated by the ANTLR tool
|
||||
func NewATN(grammarType int, maxTokenType int) *ATN {
|
||||
return &ATN{
|
||||
grammarType: grammarType,
|
||||
@@ -53,7 +68,7 @@ func NewATN(grammarType int, maxTokenType int) *ATN {
|
||||
}
|
||||
}
|
||||
|
||||
// NextTokensInContext computes the set of valid tokens that can occur starting
|
||||
// NextTokensInContext computes and returns the set of valid tokens that can occur starting
|
||||
// in state s. If ctx is nil, the set of tokens will not include what can follow
|
||||
// the rule surrounding s. In other words, the set will be restricted to tokens
|
||||
// reachable staying within the rule of s.
|
||||
@@ -61,8 +76,8 @@ func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
|
||||
return NewLL1Analyzer(a).Look(s, nil, ctx)
|
||||
}
|
||||
|
||||
// NextTokensNoContext computes the set of valid tokens that can occur starting
|
||||
// in s and staying in same rule. Token.EPSILON is in set if we reach end of
|
||||
// NextTokensNoContext computes and returns the set of valid tokens that can occur starting
|
||||
// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of
|
||||
// rule.
|
||||
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
|
||||
a.mu.Lock()
|
||||
@@ -76,6 +91,8 @@ func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
|
||||
return iset
|
||||
}
|
||||
|
||||
// NextTokens computes and returns the set of valid tokens starting in state s, by
|
||||
// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil).
|
||||
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
|
||||
if ctx == nil {
|
||||
return a.NextTokensNoContext(s)
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -8,19 +8,14 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type comparable interface {
|
||||
equals(other interface{}) bool
|
||||
}
|
||||
|
||||
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
|
||||
// context). The syntactic context is a graph-structured stack node whose
|
||||
// path(s) to the root is the rule invocation(s) chain used to arrive at the
|
||||
// state. The semantic context is the tree of semantic predicates encountered
|
||||
// before reaching an ATN state.
|
||||
type ATNConfig interface {
|
||||
comparable
|
||||
|
||||
hash() int
|
||||
Equals(o Collectable[ATNConfig]) bool
|
||||
Hash() int
|
||||
|
||||
GetState() ATNState
|
||||
GetAlt() int
|
||||
@@ -47,7 +42,7 @@ type BaseATNConfig struct {
|
||||
reachesIntoOuterContext int
|
||||
}
|
||||
|
||||
func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
|
||||
func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
|
||||
return &BaseATNConfig{
|
||||
state: old.state,
|
||||
alt: old.alt,
|
||||
@@ -135,11 +130,16 @@ func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
|
||||
b.reachesIntoOuterContext = v
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
|
||||
// for a collection.
|
||||
//
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (b *BaseATNConfig) equals(o interface{}) bool {
|
||||
func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
|
||||
if b == o {
|
||||
return true
|
||||
} else if o == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var other, ok = o.(*BaseATNConfig)
|
||||
@@ -153,30 +153,32 @@ func (b *BaseATNConfig) equals(o interface{}) bool {
|
||||
if b.context == nil {
|
||||
equal = other.context == nil
|
||||
} else {
|
||||
equal = b.context.equals(other.context)
|
||||
equal = b.context.Equals(other.context)
|
||||
}
|
||||
|
||||
var (
|
||||
nums = b.state.GetStateNumber() == other.state.GetStateNumber()
|
||||
alts = b.alt == other.alt
|
||||
cons = b.semanticContext.equals(other.semanticContext)
|
||||
cons = b.semanticContext.Equals(other.semanticContext)
|
||||
sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
|
||||
)
|
||||
|
||||
return nums && alts && cons && sups && equal
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) hash() int {
|
||||
// Hash is the default hash function for BaseATNConfig, when no specialist hash function
|
||||
// is required for a collection
|
||||
func (b *BaseATNConfig) Hash() int {
|
||||
var c int
|
||||
if b.context != nil {
|
||||
c = b.context.hash()
|
||||
c = b.context.Hash()
|
||||
}
|
||||
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, b.state.GetStateNumber())
|
||||
h = murmurUpdate(h, b.alt)
|
||||
h = murmurUpdate(h, c)
|
||||
h = murmurUpdate(h, b.semanticContext.hash())
|
||||
h = murmurUpdate(h, b.semanticContext.Hash())
|
||||
return murmurFinish(h, 4)
|
||||
}
|
||||
|
||||
@@ -243,7 +245,9 @@ func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *Lex
|
||||
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
|
||||
}
|
||||
|
||||
func (l *LexerATNConfig) hash() int {
|
||||
// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (l *LexerATNConfig) Hash() int {
|
||||
var f int
|
||||
if l.passedThroughNonGreedyDecision {
|
||||
f = 1
|
||||
@@ -253,15 +257,20 @@ func (l *LexerATNConfig) hash() int {
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, l.state.GetStateNumber())
|
||||
h = murmurUpdate(h, l.alt)
|
||||
h = murmurUpdate(h, l.context.hash())
|
||||
h = murmurUpdate(h, l.semanticContext.hash())
|
||||
h = murmurUpdate(h, l.context.Hash())
|
||||
h = murmurUpdate(h, l.semanticContext.Hash())
|
||||
h = murmurUpdate(h, f)
|
||||
h = murmurUpdate(h, l.lexerActionExecutor.hash())
|
||||
h = murmurUpdate(h, l.lexerActionExecutor.Hash())
|
||||
h = murmurFinish(h, 6)
|
||||
return h
|
||||
}
|
||||
|
||||
func (l *LexerATNConfig) equals(other interface{}) bool {
|
||||
// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
|
||||
if l == other {
|
||||
return true
|
||||
}
|
||||
var othert, ok = other.(*LexerATNConfig)
|
||||
|
||||
if l == other {
|
||||
@@ -275,7 +284,7 @@ func (l *LexerATNConfig) equals(other interface{}) bool {
|
||||
var b bool
|
||||
|
||||
if l.lexerActionExecutor != nil {
|
||||
b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
|
||||
b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
|
||||
} else {
|
||||
b = othert.lexerActionExecutor != nil
|
||||
}
|
||||
@@ -284,10 +293,9 @@ func (l *LexerATNConfig) equals(other interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return l.BaseATNConfig.equals(othert.BaseATNConfig)
|
||||
return l.BaseATNConfig.Equals(othert.BaseATNConfig)
|
||||
}
|
||||
|
||||
|
||||
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
|
||||
var ds, ok = target.(DecisionState)
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type ATNConfigSet interface {
|
||||
hash() int
|
||||
Hash() int
|
||||
Equals(o Collectable[ATNConfig]) bool
|
||||
Add(ATNConfig, *DoubleDict) bool
|
||||
AddAll([]ATNConfig) bool
|
||||
|
||||
GetStates() Set
|
||||
GetStates() *JStore[ATNState, Comparator[ATNState]]
|
||||
GetPredicates() []SemanticContext
|
||||
GetItems() []ATNConfig
|
||||
|
||||
OptimizeConfigs(interpreter *BaseATNSimulator)
|
||||
|
||||
Equals(other interface{}) bool
|
||||
|
||||
Length() int
|
||||
IsEmpty() bool
|
||||
Contains(ATNConfig) bool
|
||||
@@ -57,7 +58,7 @@ type BaseATNConfigSet struct {
|
||||
// effectively doubles the number of objects associated with ATNConfigs. All
|
||||
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
|
||||
// read-only because a set becomes a DFA state.
|
||||
configLookup Set
|
||||
configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
|
||||
|
||||
// configs is the added elements.
|
||||
configs []ATNConfig
|
||||
@@ -83,7 +84,7 @@ type BaseATNConfigSet struct {
|
||||
|
||||
// readOnly is whether it is read-only. Do not
|
||||
// allow any code to manipulate the set if true because DFA states will point at
|
||||
// sets and those must not change. It not protect other fields; conflictingAlts
|
||||
// sets and those must not change. It not, protect other fields; conflictingAlts
|
||||
// in particular, which is assigned after readOnly.
|
||||
readOnly bool
|
||||
|
||||
@@ -104,7 +105,7 @@ func (b *BaseATNConfigSet) Alts() *BitSet {
|
||||
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
|
||||
return &BaseATNConfigSet{
|
||||
cachedHash: -1,
|
||||
configLookup: newArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
|
||||
configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
|
||||
fullCtx: fullCtx,
|
||||
}
|
||||
}
|
||||
@@ -126,9 +127,11 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
|
||||
b.dipsIntoOuterContext = true
|
||||
}
|
||||
|
||||
existing := b.configLookup.Add(config).(ATNConfig)
|
||||
existing, present := b.configLookup.Put(config)
|
||||
|
||||
if existing == config {
|
||||
// The config was not already in the set
|
||||
//
|
||||
if !present {
|
||||
b.cachedHash = -1
|
||||
b.configs = append(b.configs, config) // Track order here
|
||||
return true
|
||||
@@ -154,11 +157,14 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetStates() Set {
|
||||
states := newArray2DHashSet(nil, nil)
|
||||
func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
|
||||
|
||||
// states uses the standard comparator provided by the ATNState instance
|
||||
//
|
||||
states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
states.Add(b.configs[i].GetState())
|
||||
states.Put(b.configs[i].GetState())
|
||||
}
|
||||
|
||||
return states
|
||||
@@ -214,7 +220,34 @@ func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Equals(other interface{}) bool {
|
||||
// Compare is a hack function just to verify that adding DFAstares to the known
|
||||
// set works, so long as comparison of ATNConfigSet s works. For that to work, we
|
||||
// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
|
||||
// know the order, so we do this inefficient hack. If this proves the point, then
|
||||
// we can change the config set to a better structure.
|
||||
func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
|
||||
if len(b.configs) != len(bs.configs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range b.configs {
|
||||
found := false
|
||||
for _, c2 := range bs.configs {
|
||||
if c.Equals(c2) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
|
||||
if b == other {
|
||||
return true
|
||||
} else if _, ok := other.(*BaseATNConfigSet); !ok {
|
||||
@@ -224,15 +257,15 @@ func (b *BaseATNConfigSet) Equals(other interface{}) bool {
|
||||
other2 := other.(*BaseATNConfigSet)
|
||||
|
||||
return b.configs != nil &&
|
||||
// TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
|
||||
b.fullCtx == other2.fullCtx &&
|
||||
b.uniqueAlt == other2.uniqueAlt &&
|
||||
b.conflictingAlts == other2.conflictingAlts &&
|
||||
b.hasSemanticContext == other2.hasSemanticContext &&
|
||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext
|
||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
|
||||
b.Compare(other2)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) hash() int {
|
||||
func (b *BaseATNConfigSet) Hash() int {
|
||||
if b.readOnly {
|
||||
if b.cachedHash == -1 {
|
||||
b.cachedHash = b.hashCodeConfigs()
|
||||
@@ -247,7 +280,7 @@ func (b *BaseATNConfigSet) hash() int {
|
||||
func (b *BaseATNConfigSet) hashCodeConfigs() int {
|
||||
h := 1
|
||||
for _, config := range b.configs {
|
||||
h = 31*h + config.hash()
|
||||
h = 31*h + config.Hash()
|
||||
}
|
||||
return h
|
||||
}
|
||||
@@ -283,7 +316,7 @@ func (b *BaseATNConfigSet) Clear() {
|
||||
|
||||
b.configs = make([]ATNConfig, 0)
|
||||
b.cachedHash = -1
|
||||
b.configLookup = newArray2DHashSet(nil, equalATNConfigs)
|
||||
b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) FullContext() bool {
|
||||
@@ -365,7 +398,8 @@ type OrderedATNConfigSet struct {
|
||||
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
|
||||
b := NewBaseATNConfigSet(false)
|
||||
|
||||
b.configLookup = newArray2DHashSet(nil, nil)
|
||||
// This set uses the standard Hash() and Equals() from ATNConfig
|
||||
b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
|
||||
|
||||
return &OrderedATNConfigSet{BaseATNConfigSet: b}
|
||||
}
|
||||
@@ -375,7 +409,7 @@ func hashATNConfig(i interface{}) int {
|
||||
hash := 7
|
||||
hash = 31*hash + o.GetState().GetStateNumber()
|
||||
hash = 31*hash + o.GetAlt()
|
||||
hash = 31*hash + o.GetSemanticContext().hash()
|
||||
hash = 31*hash + o.GetSemanticContext().Hash()
|
||||
return hash
|
||||
}
|
||||
|
||||
@@ -403,5 +437,5 @@ func equalATNConfigs(a, b interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return ai.GetSemanticContext().equals(bi.GetSemanticContext())
|
||||
return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -49,7 +49,8 @@ type ATNState interface {
|
||||
AddTransition(Transition, int)
|
||||
|
||||
String() string
|
||||
hash() int
|
||||
Hash() int
|
||||
Equals(Collectable[ATNState]) bool
|
||||
}
|
||||
|
||||
type BaseATNState struct {
|
||||
@@ -123,7 +124,7 @@ func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
|
||||
as.NextTokenWithinRule = v
|
||||
}
|
||||
|
||||
func (as *BaseATNState) hash() int {
|
||||
func (as *BaseATNState) Hash() int {
|
||||
return as.stateNumber
|
||||
}
|
||||
|
||||
@@ -131,7 +132,7 @@ func (as *BaseATNState) String() string {
|
||||
return strconv.Itoa(as.stateNumber)
|
||||
}
|
||||
|
||||
func (as *BaseATNState) equals(other interface{}) bool {
|
||||
func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
|
||||
if ot, ok := other.(ATNState); ok {
|
||||
return as.stateNumber == ot.GetStateNumber()
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -331,10 +331,12 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
|
||||
c.lazyInit()
|
||||
c.Fill()
|
||||
|
||||
if interval == nil {
|
||||
c.Fill()
|
||||
interval = NewInterval(0, len(c.tokens)-1)
|
||||
} else {
|
||||
c.Sync(interval.Stop)
|
||||
}
|
||||
|
||||
start := interval.Start
|
||||
147
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
generated
vendored
Normal file
147
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
package antlr
|
||||
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
// This file contains all the implementations of custom comparators used for generic collections when the
|
||||
// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would
|
||||
// put the comparators in the source file for the struct themselves, but given the organization of this code is
|
||||
// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by
|
||||
// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig
|
||||
// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended -
|
||||
// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection.
|
||||
// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations.
|
||||
|
||||
// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of
|
||||
// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some
|
||||
// type safety and avoid having to implement this for every type that we want to perform comparison on.
|
||||
//
|
||||
// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
|
||||
// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
|
||||
type ObjEqComparator[T Collectable[T]] struct{}
|
||||
|
||||
var (
|
||||
aStateEqInst = &ObjEqComparator[ATNState]{}
|
||||
aConfEqInst = &ObjEqComparator[ATNConfig]{}
|
||||
aConfCompInst = &ATNConfigComparator[ATNConfig]{}
|
||||
atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
|
||||
dfaStateEqInst = &ObjEqComparator[*DFAState]{}
|
||||
semctxEqInst = &ObjEqComparator[SemanticContext]{}
|
||||
atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
|
||||
)
|
||||
|
||||
// Equals2 delegates to the Equals() method of type T
|
||||
func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool {
|
||||
return o1.Equals(o2)
|
||||
}
|
||||
|
||||
// Hash1 delegates to the Hash() method of type T
|
||||
func (c *ObjEqComparator[T]) Hash1(o T) int {
|
||||
|
||||
return o.Hash()
|
||||
}
|
||||
|
||||
type SemCComparator[T Collectable[T]] struct{}
|
||||
|
||||
// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
|
||||
// and has a custom Equals() and Hash() implementation, because equality is not based on the
|
||||
// standard Hash() and Equals() methods of the ATNConfig type.
|
||||
type ATNConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
|
||||
func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
if o1 == o2 {
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// If either are nil, but not both, then the result is false
|
||||
//
|
||||
if o1 == nil || o2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
|
||||
o1.GetAlt() == o2.GetAlt() &&
|
||||
o1.GetSemanticContext().Equals(o2.GetSemanticContext())
|
||||
}
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
|
||||
func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
|
||||
hash := 7
|
||||
hash = 31*hash + o.GetState().GetStateNumber()
|
||||
hash = 31*hash + o.GetAlt()
|
||||
hash = 31*hash + o.GetSemanticContext().Hash()
|
||||
return hash
|
||||
}
|
||||
|
||||
// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets
|
||||
type ATNAltConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
|
||||
func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
if o1 == o2 {
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// If either are nil, but not both, then the result is false
|
||||
//
|
||||
if o1 == nil || o2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
|
||||
o1.GetContext().Equals(o2.GetContext())
|
||||
}
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
|
||||
func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, o.GetState().GetStateNumber())
|
||||
h = murmurUpdate(h, o.GetContext().Hash())
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
|
||||
// and has a custom Equals() and Hash() implementation, because equality is not based on the
|
||||
// standard Hash() and Equals() methods of the ATNConfig type.
|
||||
type BaseATNConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
|
||||
func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
if o1 == o2 {
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// If either are nil, but not both, then the result is false
|
||||
//
|
||||
if o1 == nil || o2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
|
||||
o1.GetAlt() == o2.GetAlt() &&
|
||||
o1.GetSemanticContext().Equals(o2.GetSemanticContext())
|
||||
}
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
|
||||
// delegates to the standard Hash() method of the ATNConfig type.
|
||||
func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
|
||||
|
||||
return o.Hash()
|
||||
}
|
||||
@@ -1,13 +1,9 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type DFA struct {
|
||||
// atnStartState is the ATN state in which this was created
|
||||
atnStartState DecisionState
|
||||
@@ -15,8 +11,15 @@ type DFA struct {
|
||||
decision int
|
||||
|
||||
// states is all the DFA states. Use Map to get the old state back; Set can only
|
||||
// indicate whether it is there.
|
||||
states map[int]*DFAState
|
||||
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
|
||||
// good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
|
||||
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
|
||||
// to see if they really are the same object.
|
||||
//
|
||||
//
|
||||
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
|
||||
|
||||
numstates int
|
||||
|
||||
s0 *DFAState
|
||||
|
||||
@@ -29,7 +32,7 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA {
|
||||
dfa := &DFA{
|
||||
atnStartState: atnStartState,
|
||||
decision: decision,
|
||||
states: make(map[int]*DFAState),
|
||||
states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
|
||||
}
|
||||
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
|
||||
dfa.precedenceDfa = true
|
||||
@@ -92,7 +95,8 @@ func (d *DFA) getPrecedenceDfa() bool {
|
||||
// true or nil otherwise, and d.precedenceDfa is updated.
|
||||
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
|
||||
if d.getPrecedenceDfa() != precedenceDfa {
|
||||
d.setStates(make(map[int]*DFAState))
|
||||
d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
|
||||
d.numstates = 0
|
||||
|
||||
if precedenceDfa {
|
||||
precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
|
||||
@@ -117,38 +121,12 @@ func (d *DFA) setS0(s *DFAState) {
|
||||
d.s0 = s
|
||||
}
|
||||
|
||||
func (d *DFA) getState(hash int) (*DFAState, bool) {
|
||||
s, ok := d.states[hash]
|
||||
return s, ok
|
||||
}
|
||||
|
||||
func (d *DFA) setStates(states map[int]*DFAState) {
|
||||
d.states = states
|
||||
}
|
||||
|
||||
func (d *DFA) setState(hash int, state *DFAState) {
|
||||
d.states[hash] = state
|
||||
}
|
||||
|
||||
func (d *DFA) numStates() int {
|
||||
return len(d.states)
|
||||
}
|
||||
|
||||
type dfaStateList []*DFAState
|
||||
|
||||
func (d dfaStateList) Len() int { return len(d) }
|
||||
func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
|
||||
func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
|
||||
// sortedStates returns the states in d sorted by their state number.
|
||||
func (d *DFA) sortedStates() []*DFAState {
|
||||
vs := make([]*DFAState, 0, len(d.states))
|
||||
|
||||
for _, v := range d.states {
|
||||
vs = append(vs, v)
|
||||
}
|
||||
|
||||
sort.Sort(dfaStateList(vs))
|
||||
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
|
||||
return i.stateNumber < j.stateNumber
|
||||
})
|
||||
|
||||
return vs
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -90,16 +90,16 @@ func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
|
||||
}
|
||||
|
||||
// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
|
||||
func (d *DFAState) GetAltSet() Set {
|
||||
alts := newArray2DHashSet(nil, nil)
|
||||
func (d *DFAState) GetAltSet() []int {
|
||||
var alts []int
|
||||
|
||||
if d.configs != nil {
|
||||
for _, c := range d.configs.GetItems() {
|
||||
alts.Add(c.GetAlt())
|
||||
alts = append(alts, c.GetAlt())
|
||||
}
|
||||
}
|
||||
|
||||
if alts.Len() == 0 {
|
||||
if len(alts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -130,27 +130,6 @@ func (d *DFAState) setPrediction(v int) {
|
||||
d.prediction = v
|
||||
}
|
||||
|
||||
// equals returns whether d equals other. Two DFAStates are equal if their ATN
|
||||
// configuration sets are the same. This method is used to see if a state
|
||||
// already exists.
|
||||
//
|
||||
// Because the number of alternatives and number of ATN configurations are
|
||||
// finite, there is a finite number of DFA states that can be processed. This is
|
||||
// necessary to show that the algorithm terminates.
|
||||
//
|
||||
// Cannot test the DFA state numbers here because in
|
||||
// ParserATNSimulator.addDFAState we need to know if any other state exists that
|
||||
// has d exact set of ATN configurations. The stateNumber is irrelevant.
|
||||
func (d *DFAState) equals(other interface{}) bool {
|
||||
if d == other {
|
||||
return true
|
||||
} else if _, ok := other.(*DFAState); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return d.configs.Equals(other.(*DFAState).configs)
|
||||
}
|
||||
|
||||
func (d *DFAState) String() string {
|
||||
var s string
|
||||
if d.isAcceptState {
|
||||
@@ -164,8 +143,27 @@ func (d *DFAState) String() string {
|
||||
return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
|
||||
}
|
||||
|
||||
func (d *DFAState) hash() int {
|
||||
func (d *DFAState) Hash() int {
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, d.configs.hash())
|
||||
h = murmurUpdate(h, d.configs.Hash())
|
||||
return murmurFinish(h, 1)
|
||||
}
|
||||
|
||||
// Equals returns whether d equals other. Two DFAStates are equal if their ATN
|
||||
// configuration sets are the same. This method is used to see if a state
|
||||
// already exists.
|
||||
//
|
||||
// Because the number of alternatives and number of ATN configurations are
|
||||
// finite, there is a finite number of DFA states that can be processed. This is
|
||||
// necessary to show that the algorithm terminates.
|
||||
//
|
||||
// Cannot test the DFA state numbers here because in
|
||||
// ParserATNSimulator.addDFAState we need to know if any other state exists that
|
||||
// has d exact set of ATN configurations. The stateNumber is irrelevant.
|
||||
func (d *DFAState) Equals(o Collectable[*DFAState]) bool {
|
||||
if d == o {
|
||||
return true
|
||||
}
|
||||
|
||||
return d.configs.Equals(o.(*DFAState).configs)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -87,7 +87,6 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
|
||||
return strconv.Itoa(decision) + " (" + ruleName + ")"
|
||||
}
|
||||
|
||||
//
|
||||
// Computes the set of conflicting or ambiguous alternatives from a
|
||||
// configuration set, if that information was not already provided by the
|
||||
// parser.
|
||||
@@ -97,7 +96,6 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
|
||||
// @param configs The conflicting or ambiguous configuration set.
|
||||
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
|
||||
// returns the set of alternatives represented in {@code configs}.
|
||||
//
|
||||
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
|
||||
if ReportedAlts != nil {
|
||||
return ReportedAlts
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -48,12 +48,9 @@ func NewConsoleErrorListener() *ConsoleErrorListener {
|
||||
return new(ConsoleErrorListener)
|
||||
}
|
||||
|
||||
//
|
||||
// Provides a default instance of {@link ConsoleErrorListener}.
|
||||
//
|
||||
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
|
||||
|
||||
//
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>
|
||||
@@ -64,7 +61,6 @@ var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
|
||||
// <pre>
|
||||
// line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
|
||||
// </pre>
|
||||
//
|
||||
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
||||
fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -23,7 +23,6 @@ type ErrorStrategy interface {
|
||||
|
||||
// This is the default implementation of {@link ANTLRErrorStrategy} used for
|
||||
// error Reporting and recovery in ANTLR parsers.
|
||||
//
|
||||
type DefaultErrorStrategy struct {
|
||||
errorRecoveryMode bool
|
||||
lastErrorIndex int
|
||||
@@ -61,12 +60,10 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) {
|
||||
d.endErrorCondition(recognizer)
|
||||
}
|
||||
|
||||
//
|
||||
// This method is called to enter error recovery mode when a recognition
|
||||
// exception is Reported.
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
//
|
||||
func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
|
||||
d.errorRecoveryMode = true
|
||||
}
|
||||
@@ -75,28 +72,23 @@ func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
|
||||
return d.errorRecoveryMode
|
||||
}
|
||||
|
||||
//
|
||||
// This method is called to leave error recovery mode after recovering from
|
||||
// a recognition exception.
|
||||
//
|
||||
// @param recognizer
|
||||
//
|
||||
func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
|
||||
d.errorRecoveryMode = false
|
||||
d.lastErrorStates = nil
|
||||
d.lastErrorIndex = -1
|
||||
}
|
||||
|
||||
//
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>The default implementation simply calls {@link //endErrorCondition}.</p>
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
|
||||
d.endErrorCondition(recognizer)
|
||||
}
|
||||
|
||||
//
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>The default implementation returns immediately if the handler is already
|
||||
@@ -114,7 +106,6 @@ func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
|
||||
// <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report
|
||||
// the exception</li>
|
||||
// </ul>
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
|
||||
// if we've already Reported an error and have not Matched a token
|
||||
// yet successfully, don't Report any errors.
|
||||
@@ -142,7 +133,6 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep
|
||||
// <p>The default implementation reSynchronizes the parser by consuming tokens
|
||||
// until we find one in the reSynchronization set--loosely the set of tokens
|
||||
// that can follow the current rule.</p>
|
||||
//
|
||||
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
|
||||
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
|
||||
@@ -206,7 +196,6 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException
|
||||
// compare token set at the start of the loop and at each iteration. If for
|
||||
// some reason speed is suffering for you, you can turn off d
|
||||
// functionality by simply overriding d method as a blank { }.</p>
|
||||
//
|
||||
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
|
||||
// If already recovering, don't try to Sync
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
@@ -247,7 +236,6 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
|
||||
tokens := recognizer.GetTokenStream()
|
||||
var input string
|
||||
@@ -264,7 +252,6 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
//
|
||||
// This is called by {@link //ReportError} when the exception is an
|
||||
// {@link InputMisMatchException}.
|
||||
//
|
||||
@@ -272,14 +259,12 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
//
|
||||
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
|
||||
msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
|
||||
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
//
|
||||
// This is called by {@link //ReportError} when the exception is a
|
||||
// {@link FailedPredicateException}.
|
||||
//
|
||||
@@ -287,7 +272,6 @@ func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *Inpu
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
|
||||
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
|
||||
msg := "rule " + ruleName + " " + e.message
|
||||
@@ -310,7 +294,6 @@ func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *Faile
|
||||
// {@link Parser//NotifyErrorListeners}.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
@@ -339,7 +322,6 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
||||
// {@link Parser//NotifyErrorListeners}.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
@@ -392,15 +374,14 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
|
||||
// derivation:
|
||||
//
|
||||
// <pre>
|
||||
// => ID '=' '(' INT ')' ('+' atom)* ''
|
||||
// => ID '=' '(' INT ')' ('+' atom)* ”
|
||||
// ^
|
||||
// </pre>
|
||||
//
|
||||
// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
|
||||
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
|
||||
// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
|
||||
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
|
||||
// is in the set of tokens that can follow the {@code ')'} token reference
|
||||
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
|
||||
//
|
||||
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
// SINGLE TOKEN DELETION
|
||||
MatchedSymbol := d.SingleTokenDeletion(recognizer)
|
||||
@@ -418,7 +399,6 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
panic(NewInputMisMatchException(recognizer))
|
||||
}
|
||||
|
||||
//
|
||||
// This method implements the single-token insertion inline error recovery
|
||||
// strategy. It is called by {@link //recoverInline} if the single-token
|
||||
// deletion strategy fails to recover from the mismatched input. If this
|
||||
@@ -434,7 +414,6 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
// @param recognizer the parser instance
|
||||
// @return {@code true} if single-token insertion is a viable recovery
|
||||
// strategy for the current mismatched input, otherwise {@code false}
|
||||
//
|
||||
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
|
||||
currentSymbolType := recognizer.GetTokenStream().LA(1)
|
||||
// if current token is consistent with what could come after current
|
||||
@@ -469,7 +448,6 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
|
||||
// @return the successfully Matched {@link Token} instance if single-token
|
||||
// deletion successfully recovers from the mismatched input, otherwise
|
||||
// {@code nil}
|
||||
//
|
||||
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
|
||||
NextTokenType := recognizer.GetTokenStream().LA(2)
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
@@ -507,7 +485,6 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
|
||||
// a CommonToken of the appropriate type. The text will be the token.
|
||||
// If you change what tokens must be created by the lexer,
|
||||
// override d method to create the appropriate tokens.
|
||||
//
|
||||
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
|
||||
currentSymbol := recognizer.GetCurrentToken()
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
@@ -546,7 +523,6 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a NewJava type.
|
||||
//
|
||||
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
|
||||
if t == nil {
|
||||
return "<no token>"
|
||||
@@ -578,7 +554,7 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
// from within the rule i.e., the FIRST computation done by
|
||||
// ANTLR stops at the end of a rule.
|
||||
//
|
||||
// EXAMPLE
|
||||
// # EXAMPLE
|
||||
//
|
||||
// When you find a "no viable alt exception", the input is not
|
||||
// consistent with any of the alternatives for rule r. The best
|
||||
@@ -597,7 +573,6 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
// c : ID
|
||||
// | INT
|
||||
//
|
||||
//
|
||||
// At each rule invocation, the set of tokens that could follow
|
||||
// that rule is pushed on a stack. Here are the various
|
||||
// context-sensitive follow sets:
|
||||
@@ -660,7 +635,6 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
//
|
||||
// Like Grosch I implement context-sensitive FOLLOW sets that are combined
|
||||
// at run-time upon error to avoid overhead during parsing.
|
||||
//
|
||||
func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
|
||||
atn := recognizer.GetInterpreter().atn
|
||||
ctx := recognizer.GetParserRuleContext()
|
||||
@@ -733,7 +707,6 @@ func NewBailErrorStrategy() *BailErrorStrategy {
|
||||
// in a {@link ParseCancellationException} so it is not caught by the
|
||||
// rule func catches. Use {@link Exception//getCause()} to get the
|
||||
// original {@link RecognitionException}.
|
||||
//
|
||||
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
context := recognizer.GetParserRuleContext()
|
||||
for context != nil {
|
||||
@@ -749,7 +722,6 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
|
||||
// Make sure we don't attempt to recover inline if the parser
|
||||
// successfully recovers, it won't panic an exception.
|
||||
//
|
||||
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
b.Recover(recognizer, NewInputMisMatchException(recognizer))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -74,7 +74,6 @@ func (b *BaseRecognitionException) GetInputStream() IntStream {
|
||||
|
||||
// <p>If the state number is not known, b method returns -1.</p>
|
||||
|
||||
//
|
||||
// Gets the set of input symbols which could potentially follow the
|
||||
// previously Matched symbol at the time b exception was panicn.
|
||||
//
|
||||
@@ -136,7 +135,6 @@ type NoViableAltException struct {
|
||||
// to take based upon the remaining input. It tracks the starting token
|
||||
// of the offending input and also knows where the parser was
|
||||
// in the various paths when the error. Reported by ReportNoViableAlternative()
|
||||
//
|
||||
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
|
||||
|
||||
if ctx == nil {
|
||||
@@ -177,7 +175,6 @@ type InputMisMatchException struct {
|
||||
|
||||
// This signifies any kind of mismatched input exceptions such as
|
||||
// when the current input does not Match the expected token.
|
||||
//
|
||||
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
|
||||
|
||||
i := new(InputMisMatchException)
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -223,6 +223,10 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin
|
||||
return i.toIndexString()
|
||||
}
|
||||
|
||||
func (i *IntervalSet) GetIntervals() []*Interval {
|
||||
return i.intervals
|
||||
}
|
||||
|
||||
func (i *IntervalSet) toCharString() string {
|
||||
names := make([]string, len(i.intervals))
|
||||
|
||||
198
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
generated
vendored
Normal file
198
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
package antlr
|
||||
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Collectable is an interface that a struct should implement if it is to be
|
||||
// usable as a key in these collections.
|
||||
type Collectable[T any] interface {
|
||||
Hash() int
|
||||
Equals(other Collectable[T]) bool
|
||||
}
|
||||
|
||||
type Comparator[T any] interface {
|
||||
Hash1(o T) int
|
||||
Equals2(T, T) bool
|
||||
}
|
||||
|
||||
// JStore implements a container that allows the use of a struct to calculate the key
|
||||
// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
|
||||
// serve the needs of the ANTLR Go runtime.
|
||||
//
|
||||
// For ease of porting the logic of the runtime from the master target (Java), this collection
|
||||
// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
|
||||
// function as the key. The values are stored in a standard go map which internally is a form of hashmap
|
||||
// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
|
||||
// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
|
||||
// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
|
||||
// we understand the requirements, then this is fine - this is not a general purpose collection.
|
||||
type JStore[T any, C Comparator[T]] struct {
|
||||
store map[int][]T
|
||||
len int
|
||||
comparator Comparator[T]
|
||||
}
|
||||
|
||||
func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] {
|
||||
|
||||
if comparator == nil {
|
||||
panic("comparator cannot be nil")
|
||||
}
|
||||
|
||||
s := &JStore[T, C]{
|
||||
store: make(map[int][]T, 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Put will store given value in the collection. Note that the key for storage is generated from
|
||||
// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
|
||||
// as any kind of general collection.
|
||||
//
|
||||
// If the key has a hash conflict, then the value will be added to the slice of values associated with the
|
||||
// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
|
||||
// tested by calling the equals() method on the key.
|
||||
//
|
||||
// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
|
||||
//
|
||||
// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
|
||||
func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn
|
||||
|
||||
kh := s.comparator.Hash1(value)
|
||||
|
||||
for _, v1 := range s.store[kh] {
|
||||
if s.comparator.Equals2(value, v1) {
|
||||
return v1, true
|
||||
}
|
||||
}
|
||||
s.store[kh] = append(s.store[kh], value)
|
||||
s.len++
|
||||
return value, false
|
||||
}
|
||||
|
||||
// Get will return the value associated with the key - the type of the key is the same type as the value
|
||||
// which would not generally be useful, but this is a specific thing for ANTLR where the key is
|
||||
// generated using the object we are going to store.
|
||||
func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn
|
||||
|
||||
kh := s.comparator.Hash1(key)
|
||||
|
||||
for _, v := range s.store[kh] {
|
||||
if s.comparator.Equals2(key, v) {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
return key, false
|
||||
}
|
||||
|
||||
// Contains returns true if the given key is present in the store
|
||||
func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn
|
||||
|
||||
_, present := s.Get(key)
|
||||
return present
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, v := range s.store {
|
||||
vs = append(vs, v...)
|
||||
}
|
||||
sort.Slice(vs, func(i, j int) bool {
|
||||
return less(vs[i], vs[j])
|
||||
})
|
||||
|
||||
return vs
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Each(f func(T) bool) {
|
||||
for _, e := range s.store {
|
||||
for _, v := range e {
|
||||
f(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Len() int {
|
||||
return s.len
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Values() []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, e := range s.store {
|
||||
for _, v := range e {
|
||||
vs = append(vs, v)
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
type entry[K, V any] struct {
|
||||
key K
|
||||
val V
|
||||
}
|
||||
|
||||
type JMap[K, V any, C Comparator[K]] struct {
|
||||
store map[int][]*entry[K, V]
|
||||
len int
|
||||
comparator Comparator[K]
|
||||
}
|
||||
|
||||
func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] {
|
||||
return &JMap[K, V, C]{
|
||||
store: make(map[int][]*entry[K, V], 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Put(key K, val V) {
|
||||
kh := m.comparator.Hash1(key)
|
||||
|
||||
m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
|
||||
m.len++
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Values() []V {
|
||||
vs := make([]V, 0, len(m.store))
|
||||
for _, e := range m.store {
|
||||
for _, v := range e {
|
||||
vs = append(vs, v.val)
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Get(key K) (V, bool) {
|
||||
|
||||
var none V
|
||||
kh := m.comparator.Hash1(key)
|
||||
for _, e := range m.store[kh] {
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
return e.val, true
|
||||
}
|
||||
}
|
||||
return none, false
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Len() int {
|
||||
return len(m.store)
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Delete(key K) {
|
||||
kh := m.comparator.Hash1(key)
|
||||
for i, e := range m.store[kh] {
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
|
||||
m.len--
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Clear() {
|
||||
m.store = make(map[int][]*entry[K, V])
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -232,8 +232,6 @@ func (b *BaseLexer) NextToken() Token {
|
||||
}
|
||||
return b.token
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Instruct the lexer to Skip creating a token for current lexer rule
|
||||
@@ -342,7 +340,7 @@ func (b *BaseLexer) GetCharIndex() int {
|
||||
}
|
||||
|
||||
// Return the text Matched so far for the current token or any text override.
|
||||
//Set the complete text of l token it wipes any previous changes to the text.
|
||||
// Set the complete text of l token it wipes any previous changes to the text.
|
||||
func (b *BaseLexer) GetText() string {
|
||||
if b.text != "" {
|
||||
return b.text
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -21,8 +21,8 @@ type LexerAction interface {
|
||||
getActionType() int
|
||||
getIsPositionDependent() bool
|
||||
execute(lexer Lexer)
|
||||
hash() int
|
||||
equals(other LexerAction) bool
|
||||
Hash() int
|
||||
Equals(other LexerAction) bool
|
||||
}
|
||||
|
||||
type BaseLexerAction struct {
|
||||
@@ -51,15 +51,14 @@ func (b *BaseLexerAction) getIsPositionDependent() bool {
|
||||
return b.isPositionDependent
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) hash() int {
|
||||
func (b *BaseLexerAction) Hash() int {
|
||||
return b.actionType
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) equals(other LexerAction) bool {
|
||||
func (b *BaseLexerAction) Equals(other LexerAction) bool {
|
||||
return b == other
|
||||
}
|
||||
|
||||
//
|
||||
// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
|
||||
//
|
||||
// <p>The {@code Skip} command does not have any parameters, so l action is
|
||||
@@ -85,7 +84,8 @@ func (l *LexerSkipAction) String() string {
|
||||
return "skip"
|
||||
}
|
||||
|
||||
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
|
||||
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
|
||||
//
|
||||
// with the assigned type.
|
||||
type LexerTypeAction struct {
|
||||
*BaseLexerAction
|
||||
@@ -104,14 +104,14 @@ func (l *LexerTypeAction) execute(lexer Lexer) {
|
||||
lexer.SetType(l.thetype)
|
||||
}
|
||||
|
||||
func (l *LexerTypeAction) hash() int {
|
||||
func (l *LexerTypeAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.thetype)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerTypeAction) equals(other LexerAction) bool {
|
||||
func (l *LexerTypeAction) Equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerTypeAction); !ok {
|
||||
@@ -148,14 +148,14 @@ func (l *LexerPushModeAction) execute(lexer Lexer) {
|
||||
lexer.PushMode(l.mode)
|
||||
}
|
||||
|
||||
func (l *LexerPushModeAction) hash() int {
|
||||
func (l *LexerPushModeAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.mode)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerPushModeAction) equals(other LexerAction) bool {
|
||||
func (l *LexerPushModeAction) Equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerPushModeAction); !ok {
|
||||
@@ -245,14 +245,14 @@ func (l *LexerModeAction) execute(lexer Lexer) {
|
||||
lexer.SetMode(l.mode)
|
||||
}
|
||||
|
||||
func (l *LexerModeAction) hash() int {
|
||||
func (l *LexerModeAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.mode)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerModeAction) equals(other LexerAction) bool {
|
||||
func (l *LexerModeAction) Equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerModeAction); !ok {
|
||||
@@ -303,7 +303,7 @@ func (l *LexerCustomAction) execute(lexer Lexer) {
|
||||
lexer.Action(nil, l.ruleIndex, l.actionIndex)
|
||||
}
|
||||
|
||||
func (l *LexerCustomAction) hash() int {
|
||||
func (l *LexerCustomAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.ruleIndex)
|
||||
@@ -311,13 +311,14 @@ func (l *LexerCustomAction) hash() int {
|
||||
return murmurFinish(h, 3)
|
||||
}
|
||||
|
||||
func (l *LexerCustomAction) equals(other LexerAction) bool {
|
||||
func (l *LexerCustomAction) Equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerCustomAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
|
||||
return l.ruleIndex == other.(*LexerCustomAction).ruleIndex &&
|
||||
l.actionIndex == other.(*LexerCustomAction).actionIndex
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,14 +345,14 @@ func (l *LexerChannelAction) execute(lexer Lexer) {
|
||||
lexer.SetChannel(l.channel)
|
||||
}
|
||||
|
||||
func (l *LexerChannelAction) hash() int {
|
||||
func (l *LexerChannelAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.actionType)
|
||||
h = murmurUpdate(h, l.channel)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
func (l *LexerChannelAction) equals(other LexerAction) bool {
|
||||
func (l *LexerChannelAction) Equals(other LexerAction) bool {
|
||||
if l == other {
|
||||
return true
|
||||
} else if _, ok := other.(*LexerChannelAction); !ok {
|
||||
@@ -412,10 +413,10 @@ func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
|
||||
l.lexerAction.execute(lexer)
|
||||
}
|
||||
|
||||
func (l *LexerIndexedCustomAction) hash() int {
|
||||
func (l *LexerIndexedCustomAction) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, l.offset)
|
||||
h = murmurUpdate(h, l.lexerAction.hash())
|
||||
h = murmurUpdate(h, l.lexerAction.Hash())
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
@@ -425,6 +426,7 @@ func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
|
||||
} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
|
||||
return false
|
||||
} else {
|
||||
return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
|
||||
return l.offset == other.(*LexerIndexedCustomAction).offset &&
|
||||
l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction)
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,11 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "golang.org/x/exp/slices"
|
||||
|
||||
// Represents an executor for a sequence of lexer actions which traversed during
|
||||
// the Matching operation of a lexer rule (token).
|
||||
//
|
||||
@@ -12,8 +14,8 @@ package antlr
|
||||
// not cause bloating of the {@link DFA} created for the lexer.</p>
|
||||
|
||||
type LexerActionExecutor struct {
|
||||
lexerActions []LexerAction
|
||||
cachedHash int
|
||||
lexerActions []LexerAction
|
||||
cachedHash int
|
||||
}
|
||||
|
||||
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
|
||||
@@ -30,7 +32,7 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
|
||||
// of the performance-critical {@link LexerATNConfig//hashCode} operation.
|
||||
l.cachedHash = murmurInit(57)
|
||||
for _, a := range lexerActions {
|
||||
l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
|
||||
l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
|
||||
}
|
||||
|
||||
return l
|
||||
@@ -151,14 +153,17 @@ func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LexerActionExecutor) hash() int {
|
||||
func (l *LexerActionExecutor) Hash() int {
|
||||
if l == nil {
|
||||
// TODO: Why is this here? l should not be nil
|
||||
return 61
|
||||
}
|
||||
|
||||
// TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
|
||||
return l.cachedHash
|
||||
}
|
||||
|
||||
func (l *LexerActionExecutor) equals(other interface{}) bool {
|
||||
func (l *LexerActionExecutor) Equals(other interface{}) bool {
|
||||
if l == other {
|
||||
return true
|
||||
}
|
||||
@@ -169,5 +174,13 @@ func (l *LexerActionExecutor) equals(other interface{}) bool {
|
||||
if othert == nil {
|
||||
return false
|
||||
}
|
||||
return l.cachedHash == othert.cachedHash && &l.lexerActions == &othert.lexerActions
|
||||
if l.cachedHash != othert.cachedHash {
|
||||
return false
|
||||
}
|
||||
if len(l.lexerActions) != len(othert.lexerActions) {
|
||||
return false
|
||||
}
|
||||
return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
|
||||
return i.Equals(j)
|
||||
})
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -591,19 +591,24 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
|
||||
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
|
||||
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
|
||||
}
|
||||
hash := proposed.hash()
|
||||
dfa := l.decisionToDFA[l.mode]
|
||||
|
||||
l.atn.stateMu.Lock()
|
||||
defer l.atn.stateMu.Unlock()
|
||||
existing, ok := dfa.getState(hash)
|
||||
if ok {
|
||||
existing, present := dfa.states.Get(proposed)
|
||||
if present {
|
||||
|
||||
// This state was already present, so just return it.
|
||||
//
|
||||
proposed = existing
|
||||
} else {
|
||||
proposed.stateNumber = dfa.numStates()
|
||||
|
||||
// We need to add the new state
|
||||
//
|
||||
proposed.stateNumber = dfa.states.Len()
|
||||
configs.SetReadOnly(true)
|
||||
proposed.configs = configs
|
||||
dfa.setState(hash, proposed)
|
||||
dfa.states.Put(proposed)
|
||||
}
|
||||
if !suppressEdge {
|
||||
dfa.setS0(proposed)
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -14,14 +14,15 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
|
||||
return la
|
||||
}
|
||||
|
||||
//* Special value added to the lookahead sets to indicate that we hit
|
||||
// a predicate during analysis if {@code seeThruPreds==false}.
|
||||
///
|
||||
// - Special value added to the lookahead sets to indicate that we hit
|
||||
// a predicate during analysis if {@code seeThruPreds==false}.
|
||||
//
|
||||
// /
|
||||
const (
|
||||
LL1AnalyzerHitPred = TokenInvalidType
|
||||
)
|
||||
|
||||
//*
|
||||
// *
|
||||
// Calculates the SLL(1) expected lookahead set for each outgoing transition
|
||||
// of an {@link ATNState}. The returned array has one element for each
|
||||
// outgoing transition in {@code s}. If the closure from transition
|
||||
@@ -38,7 +39,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
||||
look := make([]*IntervalSet, count)
|
||||
for alt := 0; alt < count; alt++ {
|
||||
look[alt] = NewIntervalSet()
|
||||
lookBusy := newArray2DHashSet(nil, nil)
|
||||
lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
|
||||
seeThruPreds := false // fail to get lookahead upon pred
|
||||
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
|
||||
// Wipe out lookahead for la alternative if we found nothing
|
||||
@@ -50,7 +51,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
||||
return look
|
||||
}
|
||||
|
||||
//*
|
||||
// *
|
||||
// Compute set of tokens that can follow {@code s} in the ATN in the
|
||||
// specified {@code ctx}.
|
||||
//
|
||||
@@ -67,7 +68,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
||||
//
|
||||
// @return The set of tokens that can follow {@code s} in the ATN in the
|
||||
// specified {@code ctx}.
|
||||
///
|
||||
// /
|
||||
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
|
||||
r := NewIntervalSet()
|
||||
seeThruPreds := true // ignore preds get all lookahead
|
||||
@@ -75,7 +76,7 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
|
||||
if ctx != nil {
|
||||
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
|
||||
}
|
||||
la.look1(s, stopState, lookContext, r, newArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true)
|
||||
la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true)
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -109,14 +110,14 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
|
||||
// outermost context is reached. This parameter has no effect if {@code ctx}
|
||||
// is {@code nil}.
|
||||
|
||||
func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
|
||||
func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
|
||||
|
||||
returnState := la.atn.states[ctx.getReturnState(i)]
|
||||
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
|
||||
}
|
||||
|
||||
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
|
||||
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
|
||||
|
||||
c := NewBaseATNConfig6(s, 0, ctx)
|
||||
|
||||
@@ -124,8 +125,11 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look
|
||||
return
|
||||
}
|
||||
|
||||
lookBusy.Add(c)
|
||||
_, present := lookBusy.Put(c)
|
||||
if present {
|
||||
return
|
||||
|
||||
}
|
||||
if s == stopState {
|
||||
if ctx == nil {
|
||||
look.addOne(TokenEpsilon)
|
||||
@@ -198,7 +202,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look
|
||||
}
|
||||
}
|
||||
|
||||
func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
|
||||
func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
|
||||
|
||||
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -91,7 +91,6 @@ func NewBaseParser(input TokenStream) *BaseParser {
|
||||
// bypass alternatives.
|
||||
//
|
||||
// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
|
||||
//
|
||||
var bypassAltsAtnCache = make(map[string]int)
|
||||
|
||||
// reset the parser's state//
|
||||
@@ -230,7 +229,6 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener {
|
||||
// @param listener the listener to add
|
||||
//
|
||||
// @panics nilPointerException if {@code} listener is {@code nil}
|
||||
//
|
||||
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
|
||||
if listener == nil {
|
||||
panic("listener")
|
||||
@@ -241,13 +239,11 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
|
||||
p.parseListeners = append(p.parseListeners, listener)
|
||||
}
|
||||
|
||||
//
|
||||
// Remove {@code listener} from the list of parse listeners.
|
||||
//
|
||||
// <p>If {@code listener} is {@code nil} or has not been added as a parse
|
||||
// listener, p.method does nothing.</p>
|
||||
// @param listener the listener to remove
|
||||
//
|
||||
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
|
||||
|
||||
if p.parseListeners != nil {
|
||||
@@ -289,11 +285,9 @@ func (p *BaseParser) TriggerEnterRuleEvent() {
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Notify any parse listeners of an exit rule event.
|
||||
//
|
||||
// @see //addParseListener
|
||||
//
|
||||
func (p *BaseParser) TriggerExitRuleEvent() {
|
||||
if p.parseListeners != nil {
|
||||
// reverse order walk of listeners
|
||||
@@ -330,7 +324,6 @@ func (p *BaseParser) setTokenFactory(factory TokenFactory) {
|
||||
//
|
||||
// @panics UnsupportedOperationException if the current parser does not
|
||||
// implement the {@link //getSerializedATN()} method.
|
||||
//
|
||||
func (p *BaseParser) GetATNWithBypassAlts() {
|
||||
|
||||
// TODO
|
||||
@@ -402,7 +395,6 @@ func (p *BaseParser) SetTokenStream(input TokenStream) {
|
||||
|
||||
// Match needs to return the current input symbol, which gets put
|
||||
// into the label for the associated token ref e.g., x=ID.
|
||||
//
|
||||
func (p *BaseParser) GetCurrentToken() Token {
|
||||
return p.input.LT(1)
|
||||
}
|
||||
@@ -624,7 +616,6 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool {
|
||||
// respectively.
|
||||
//
|
||||
// @see ATN//getExpectedTokens(int, RuleContext)
|
||||
//
|
||||
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
|
||||
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
|
||||
}
|
||||
@@ -686,7 +677,7 @@ func (p *BaseParser) GetDFAStrings() string {
|
||||
func (p *BaseParser) DumpDFA() {
|
||||
seenOne := false
|
||||
for _, dfa := range p.Interpreter.decisionToDFA {
|
||||
if dfa.numStates() > 0 {
|
||||
if dfa.states.Len() > 0 {
|
||||
if seenOne {
|
||||
fmt.Println()
|
||||
}
|
||||
@@ -703,7 +694,6 @@ func (p *BaseParser) GetSourceName() string {
|
||||
|
||||
// During a parse is sometimes useful to listen in on the rule entry and exit
|
||||
// events as well as token Matches. p.is for quick and dirty debugging.
|
||||
//
|
||||
func (p *BaseParser) SetTrace(trace *TraceListener) {
|
||||
if trace == nil {
|
||||
p.RemoveParseListener(p.tracer)
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ParserATNSimulatorDebug = false
|
||||
ParserATNSimulatorListATNDecisions = false
|
||||
ParserATNSimulatorDFADebug = false
|
||||
ParserATNSimulatorRetryDebug = false
|
||||
TurnOffLRLoopEntryBranchOpt = false
|
||||
ParserATNSimulatorDebug = false
|
||||
ParserATNSimulatorTraceATNSim = false
|
||||
ParserATNSimulatorDFADebug = false
|
||||
ParserATNSimulatorRetryDebug = false
|
||||
TurnOffLRLoopEntryBranchOpt = false
|
||||
)
|
||||
|
||||
type ParserATNSimulator struct {
|
||||
@@ -70,8 +70,8 @@ func (p *ParserATNSimulator) reset() {
|
||||
}
|
||||
|
||||
func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
|
||||
if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
|
||||
fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) +
|
||||
if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
|
||||
" exec LA(1)==" + p.getLookaheadName(input) +
|
||||
" line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
|
||||
strconv.Itoa(input.LT(1).GetColumn()))
|
||||
@@ -111,15 +111,15 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou
|
||||
|
||||
if s0 == nil {
|
||||
if outerContext == nil {
|
||||
outerContext = RuleContextEmpty
|
||||
outerContext = ParserRuleContextEmpty
|
||||
}
|
||||
if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
|
||||
if ParserATNSimulatorDebug {
|
||||
fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
|
||||
" exec LA(1)==" + p.getLookaheadName(input) +
|
||||
", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
|
||||
}
|
||||
fullCtx := false
|
||||
s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx)
|
||||
s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
|
||||
|
||||
p.atn.stateMu.Lock()
|
||||
if dfa.getPrecedenceDfa() {
|
||||
@@ -174,17 +174,18 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou
|
||||
// Reporting insufficient predicates
|
||||
|
||||
// cover these cases:
|
||||
// dead end
|
||||
// single alt
|
||||
// single alt + preds
|
||||
// conflict
|
||||
// conflict + preds
|
||||
//
|
||||
// dead end
|
||||
// single alt
|
||||
// single alt + preds
|
||||
// conflict
|
||||
// conflict + preds
|
||||
func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
|
||||
|
||||
if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
|
||||
if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
|
||||
" exec LA(1)==" + p.getLookaheadName(input) +
|
||||
", DFA state " + s0.String() +
|
||||
", LA(1)==" + p.getLookaheadName(input) +
|
||||
" line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
|
||||
}
|
||||
|
||||
@@ -277,8 +278,6 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
|
||||
t = input.LA(1)
|
||||
}
|
||||
}
|
||||
|
||||
panic("Should not have reached p state")
|
||||
}
|
||||
|
||||
// Get an existing target state for an edge in the DFA. If the target state
|
||||
@@ -384,7 +383,7 @@ func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState
|
||||
// comes back with reach.uniqueAlt set to a valid alt
|
||||
func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
|
||||
|
||||
if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
|
||||
if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("execATNWithFullContext " + s0.String())
|
||||
}
|
||||
|
||||
@@ -492,9 +491,6 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT
|
||||
}
|
||||
|
||||
func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
|
||||
if ParserATNSimulatorDebug {
|
||||
fmt.Println("in computeReachSet, starting closure: " + closure.String())
|
||||
}
|
||||
if p.mergeCache == nil {
|
||||
p.mergeCache = NewDoubleDict()
|
||||
}
|
||||
@@ -570,7 +566,7 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
|
||||
//
|
||||
if reach == nil {
|
||||
reach = NewBaseATNConfigSet(fullCtx)
|
||||
closureBusy := newArray2DHashSet(nil, nil)
|
||||
closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
|
||||
treatEOFAsEpsilon := t == TokenEOF
|
||||
amount := len(intermediate.configs)
|
||||
for k := 0; k < amount; k++ {
|
||||
@@ -610,6 +606,11 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
|
||||
reach.Add(skippedStopStates[l], p.mergeCache)
|
||||
}
|
||||
}
|
||||
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
|
||||
}
|
||||
|
||||
if len(reach.GetItems()) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -617,7 +618,6 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
|
||||
return reach
|
||||
}
|
||||
|
||||
//
|
||||
// Return a configuration set containing only the configurations from
|
||||
// {@code configs} which are in a {@link RuleStopState}. If all
|
||||
// configurations in {@code configs} are already in a rule stop state, p
|
||||
@@ -636,7 +636,6 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
|
||||
// @return {@code configs} if all configurations in {@code configs} are in a
|
||||
// rule stop state, otherwise return a Newconfiguration set containing only
|
||||
// the configurations from {@code configs} which are in a rule stop state
|
||||
//
|
||||
func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
|
||||
if PredictionModeallConfigsInRuleStopStates(configs) {
|
||||
return configs
|
||||
@@ -662,16 +661,20 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full
|
||||
// always at least the implicit call to start rule
|
||||
initialContext := predictionContextFromRuleContext(p.atn, ctx)
|
||||
configs := NewBaseATNConfigSet(fullCtx)
|
||||
if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("computeStartState from ATN state " + a.String() +
|
||||
" initialContext=" + initialContext.String())
|
||||
}
|
||||
|
||||
for i := 0; i < len(a.GetTransitions()); i++ {
|
||||
target := a.GetTransitions()[i].getTarget()
|
||||
c := NewBaseATNConfig6(target, i+1, initialContext)
|
||||
closureBusy := newArray2DHashSet(nil, nil)
|
||||
closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
|
||||
p.closure(c, configs, closureBusy, true, fullCtx, false)
|
||||
}
|
||||
return configs
|
||||
}
|
||||
|
||||
//
|
||||
// This method transforms the start state computed by
|
||||
// {@link //computeStartState} to the special start state used by a
|
||||
// precedence DFA for a particular precedence value. The transformation
|
||||
@@ -726,7 +729,6 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full
|
||||
// @return The transformed configuration set representing the start state
|
||||
// for a precedence DFA at a particular precedence level (determined by
|
||||
// calling {@link Parser//getPrecedence}).
|
||||
//
|
||||
func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
|
||||
|
||||
statesFromAlt1 := make(map[int]PredictionContext)
|
||||
@@ -760,7 +762,7 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConf
|
||||
// (basically a graph subtraction algorithm).
|
||||
if !config.getPrecedenceFilterSuppressed() {
|
||||
context := statesFromAlt1[config.GetState().GetStateNumber()]
|
||||
if context != nil && context.equals(config.GetContext()) {
|
||||
if context != nil && context.Equals(config.GetContext()) {
|
||||
// eliminated
|
||||
continue
|
||||
}
|
||||
@@ -824,7 +826,6 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
|
||||
return pairs
|
||||
}
|
||||
|
||||
//
|
||||
// This method is used to improve the localization of error messages by
|
||||
// choosing an alternative rather than panicing a
|
||||
// {@link NoViableAltException} in particular prediction scenarios where the
|
||||
@@ -869,7 +870,6 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
|
||||
// @return The value to return from {@link //AdaptivePredict}, or
|
||||
// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
|
||||
// identified and {@link //AdaptivePredict} should Report an error instead.
|
||||
//
|
||||
func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
|
||||
cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
|
||||
semValidConfigs := cfgs[0]
|
||||
@@ -938,11 +938,11 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigS
|
||||
}
|
||||
|
||||
// Look through a list of predicate/alt pairs, returning alts for the
|
||||
// pairs that win. A {@code NONE} predicate indicates an alt containing an
|
||||
// unpredicated config which behaves as "always true." If !complete
|
||||
// then we stop at the first predicate that evaluates to true. This
|
||||
// includes pairs with nil predicates.
|
||||
//
|
||||
// pairs that win. A {@code NONE} predicate indicates an alt containing an
|
||||
// unpredicated config which behaves as "always true." If !complete
|
||||
// then we stop at the first predicate that evaluates to true. This
|
||||
// includes pairs with nil predicates.
|
||||
func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
|
||||
predictions := NewBitSet()
|
||||
for i := 0; i < len(predPredictions); i++ {
|
||||
@@ -972,16 +972,16 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti
|
||||
return predictions
|
||||
}
|
||||
|
||||
func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
|
||||
func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
|
||||
initialDepth := 0
|
||||
p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
|
||||
fullCtx, initialDepth, treatEOFAsEpsilon)
|
||||
}
|
||||
|
||||
func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
|
||||
if ParserATNSimulatorDebug {
|
||||
func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("closure(" + config.String() + ")")
|
||||
fmt.Println("configs(" + configs.String() + ")")
|
||||
//fmt.Println("configs(" + configs.String() + ")")
|
||||
if config.GetReachesIntoOuterContext() > 50 {
|
||||
panic("problem")
|
||||
}
|
||||
@@ -1031,7 +1031,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs
|
||||
}
|
||||
|
||||
// Do the actual work of walking epsilon edges//
|
||||
func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
|
||||
func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
|
||||
state := config.GetState()
|
||||
// optimization
|
||||
if !state.GetEpsilonOnlyTransitions() {
|
||||
@@ -1066,7 +1066,8 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet,
|
||||
|
||||
c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
|
||||
|
||||
if closureBusy.Add(c) != c {
|
||||
_, present := closureBusy.Put(c)
|
||||
if present {
|
||||
// avoid infinite recursion for right-recursive rules
|
||||
continue
|
||||
}
|
||||
@@ -1077,9 +1078,13 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet,
|
||||
fmt.Println("dips into outer ctx: " + c.String())
|
||||
}
|
||||
} else {
|
||||
if !t.getIsEpsilon() && closureBusy.Add(c) != c {
|
||||
// avoid infinite recursion for EOF* and EOF+
|
||||
continue
|
||||
|
||||
if !t.getIsEpsilon() {
|
||||
_, present := closureBusy.Put(c)
|
||||
if present {
|
||||
// avoid infinite recursion for EOF* and EOF+
|
||||
continue
|
||||
}
|
||||
}
|
||||
if _, ok := t.(*RuleTransition); ok {
|
||||
// latch when newDepth goes negative - once we step out of the entry context we can't return
|
||||
@@ -1104,7 +1109,16 @@ func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNC
|
||||
// left-recursion elimination. For efficiency, also check if
|
||||
// the context has an empty stack case. If so, it would mean
|
||||
// global FOLLOW so we can't perform optimization
|
||||
if startLoop, ok := _p.(StarLoopEntryState); !ok || !startLoop.precedenceRuleDecision || config.GetContext().isEmpty() || config.GetContext().hasEmptyPath() {
|
||||
if _p.GetStateType() != ATNStateStarLoopEntry {
|
||||
return false
|
||||
}
|
||||
startLoop, ok := _p.(*StarLoopEntryState)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if !startLoop.precedenceRuleDecision ||
|
||||
config.GetContext().isEmpty() ||
|
||||
config.GetContext().hasEmptyPath() {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1117,8 +1131,8 @@ func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNC
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
decisionStartState := _p.(BlockStartState).GetTransitions()[0].getTarget().(BlockStartState)
|
||||
x := _p.GetTransitions()[0].getTarget()
|
||||
decisionStartState := x.(BlockStartState)
|
||||
blockEndStateNum := decisionStartState.getEndState().stateNumber
|
||||
blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
|
||||
|
||||
@@ -1355,13 +1369,12 @@ func (p *ParserATNSimulator) GetTokenName(t int) string {
|
||||
return "EOF"
|
||||
}
|
||||
|
||||
if p.parser != nil && p.parser.GetLiteralNames() != nil {
|
||||
if t >= len(p.parser.GetLiteralNames()) {
|
||||
fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ","))
|
||||
// fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect
|
||||
} else {
|
||||
return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
|
||||
}
|
||||
if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
|
||||
return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
|
||||
}
|
||||
|
||||
if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
|
||||
return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
|
||||
}
|
||||
|
||||
return strconv.Itoa(t)
|
||||
@@ -1372,9 +1385,9 @@ func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
|
||||
}
|
||||
|
||||
// Used for debugging in AdaptivePredict around execATN but I cut
|
||||
// it out for clarity now that alg. works well. We can leave p
|
||||
// "dead" code for a bit.
|
||||
//
|
||||
// it out for clarity now that alg. works well. We can leave p
|
||||
// "dead" code for a bit.
|
||||
func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
|
||||
|
||||
panic("Not implemented")
|
||||
@@ -1421,7 +1434,6 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
|
||||
return alt
|
||||
}
|
||||
|
||||
//
|
||||
// Add an edge to the DFA, if possible. This method calls
|
||||
// {@link //addDFAState} to ensure the {@code to} state is present in the
|
||||
// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
|
||||
@@ -1440,7 +1452,6 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
|
||||
// @return If {@code to} is {@code nil}, p method returns {@code nil}
|
||||
// otherwise p method returns the result of calling {@link //addDFAState}
|
||||
// on {@code to}
|
||||
//
|
||||
func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
|
||||
if ParserATNSimulatorDebug {
|
||||
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
|
||||
@@ -1472,7 +1483,6 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
|
||||
return to
|
||||
}
|
||||
|
||||
//
|
||||
// Add state {@code D} to the DFA if it is not already present, and return
|
||||
// the actual instance stored in the DFA. If a state equivalent to {@code D}
|
||||
// is already in the DFA, the existing state is returned. Otherwise p
|
||||
@@ -1486,25 +1496,30 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
|
||||
// @return The state stored in the DFA. This will be either the existing
|
||||
// state if {@code D} is already in the DFA, or {@code D} itself if the
|
||||
// state was not already present.
|
||||
//
|
||||
func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
|
||||
if d == ATNSimulatorError {
|
||||
return d
|
||||
}
|
||||
hash := d.hash()
|
||||
existing, ok := dfa.getState(hash)
|
||||
if ok {
|
||||
existing, present := dfa.states.Get(d)
|
||||
if present {
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Print("addDFAState " + d.String() + " exists")
|
||||
}
|
||||
return existing
|
||||
}
|
||||
d.stateNumber = dfa.numStates()
|
||||
|
||||
// The state was not present, so update it with configs
|
||||
//
|
||||
d.stateNumber = dfa.states.Len()
|
||||
if !d.configs.ReadOnly() {
|
||||
d.configs.OptimizeConfigs(p.BaseATNSimulator)
|
||||
d.configs.SetReadOnly(true)
|
||||
}
|
||||
dfa.setState(hash, d)
|
||||
if ParserATNSimulatorDebug {
|
||||
fmt.Println("adding NewDFA state: " + d.String())
|
||||
dfa.states.Put(d)
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("addDFAState new " + d.String())
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -340,7 +340,7 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s
|
||||
return s
|
||||
}
|
||||
|
||||
var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
|
||||
var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
|
||||
|
||||
type InterpreterRuleContext interface {
|
||||
ParserRuleContext
|
||||
@@ -1,10 +1,12 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/exp/slices"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
@@ -26,10 +28,10 @@ var (
|
||||
)
|
||||
|
||||
type PredictionContext interface {
|
||||
hash() int
|
||||
Hash() int
|
||||
Equals(interface{}) bool
|
||||
GetParent(int) PredictionContext
|
||||
getReturnState(int) int
|
||||
equals(PredictionContext) bool
|
||||
length() int
|
||||
isEmpty() bool
|
||||
hasEmptyPath() bool
|
||||
@@ -53,7 +55,7 @@ func (b *BasePredictionContext) isEmpty() bool {
|
||||
|
||||
func calculateHash(parent PredictionContext, returnState int) int {
|
||||
h := murmurInit(1)
|
||||
h = murmurUpdate(h, parent.hash())
|
||||
h = murmurUpdate(h, parent.Hash())
|
||||
h = murmurUpdate(h, returnState)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
@@ -86,7 +88,6 @@ func NewPredictionContextCache() *PredictionContextCache {
|
||||
// Add a context to the cache and return it. If the context already exists,
|
||||
// return that one instead and do not add a Newcontext to the cache.
|
||||
// Protect shared cache from unsafe thread access.
|
||||
//
|
||||
func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
|
||||
if ctx == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY
|
||||
@@ -160,28 +161,28 @@ func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
|
||||
return b.returnState == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
|
||||
func (b *BaseSingletonPredictionContext) Hash() int {
|
||||
return b.cachedHash
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool {
|
||||
if b == other {
|
||||
return true
|
||||
} else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
|
||||
}
|
||||
if _, ok := other.(*BaseSingletonPredictionContext); !ok {
|
||||
return false
|
||||
} else if b.hash() != other.hash() {
|
||||
return false // can't be same if hash is different
|
||||
}
|
||||
|
||||
otherP := other.(*BaseSingletonPredictionContext)
|
||||
|
||||
if b.returnState != other.getReturnState(0) {
|
||||
if b.returnState != otherP.getReturnState(0) {
|
||||
return false
|
||||
} else if b.parentCtx == nil {
|
||||
}
|
||||
if b.parentCtx == nil {
|
||||
return otherP.parentCtx == nil
|
||||
}
|
||||
|
||||
return b.parentCtx.equals(otherP.parentCtx)
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) hash() int {
|
||||
return b.cachedHash
|
||||
return b.parentCtx.Equals(otherP.parentCtx)
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) String() string {
|
||||
@@ -215,7 +216,7 @@ func NewEmptyPredictionContext() *EmptyPredictionContext {
|
||||
p := new(EmptyPredictionContext)
|
||||
|
||||
p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
|
||||
|
||||
p.cachedHash = calculateEmptyHash()
|
||||
return p
|
||||
}
|
||||
|
||||
@@ -231,7 +232,11 @@ func (e *EmptyPredictionContext) getReturnState(index int) int {
|
||||
return e.returnState
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
|
||||
func (e *EmptyPredictionContext) Hash() int {
|
||||
return e.cachedHash
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) Equals(other interface{}) bool {
|
||||
return e == other
|
||||
}
|
||||
|
||||
@@ -254,7 +259,7 @@ func NewArrayPredictionContext(parents []PredictionContext, returnStates []int)
|
||||
hash := murmurInit(1)
|
||||
|
||||
for _, parent := range parents {
|
||||
hash = murmurUpdate(hash, parent.hash())
|
||||
hash = murmurUpdate(hash, parent.Hash())
|
||||
}
|
||||
|
||||
for _, returnState := range returnStates {
|
||||
@@ -298,18 +303,31 @@ func (a *ArrayPredictionContext) getReturnState(index int) int {
|
||||
return a.returnStates[index]
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
|
||||
if _, ok := other.(*ArrayPredictionContext); !ok {
|
||||
return false
|
||||
} else if a.cachedHash != other.hash() {
|
||||
return false // can't be same if hash is different
|
||||
} else {
|
||||
otherP := other.(*ArrayPredictionContext)
|
||||
return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
|
||||
// Equals is the default comparison function for ArrayPredictionContext when no specialized
|
||||
// implementation is needed for a collection
|
||||
func (a *ArrayPredictionContext) Equals(o interface{}) bool {
|
||||
if a == o {
|
||||
return true
|
||||
}
|
||||
other, ok := o.(*ArrayPredictionContext)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if a.cachedHash != other.Hash() {
|
||||
return false // can't be same if hash is different
|
||||
}
|
||||
|
||||
// Must compare the actual array elements and not just the array address
|
||||
//
|
||||
return slices.Equal(a.returnStates, other.returnStates) &&
|
||||
slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool {
|
||||
return x.Equals(y)
|
||||
})
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) hash() int {
|
||||
// Hash is the default hash function for ArrayPredictionContext when no specialized
|
||||
// implementation is needed for a collection
|
||||
func (a *ArrayPredictionContext) Hash() int {
|
||||
return a.BasePredictionContext.cachedHash
|
||||
}
|
||||
|
||||
@@ -343,11 +361,11 @@ func (a *ArrayPredictionContext) String() string {
|
||||
// /
|
||||
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
|
||||
if outerContext == nil {
|
||||
outerContext = RuleContextEmpty
|
||||
outerContext = ParserRuleContextEmpty
|
||||
}
|
||||
// if we are in RuleContext of start rule, s, then BasePredictionContext
|
||||
// is EMPTY. Nobody called us. (if we are empty, return empty)
|
||||
if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
|
||||
if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
// If we have a parent, convert it to a BasePredictionContext graph
|
||||
@@ -359,11 +377,20 @@ func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) Predicti
|
||||
}
|
||||
|
||||
func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
// share same graph if both same
|
||||
if a == b {
|
||||
|
||||
// Share same graph if both same
|
||||
//
|
||||
if a == b || a.Equals(b) {
|
||||
return a
|
||||
}
|
||||
|
||||
// In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test
|
||||
// in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created
|
||||
// from it.
|
||||
// In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion
|
||||
// will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from
|
||||
// either of them.
|
||||
|
||||
ac, ok1 := a.(*BaseSingletonPredictionContext)
|
||||
bc, ok2 := b.(*BaseSingletonPredictionContext)
|
||||
|
||||
@@ -380,17 +407,32 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
|
||||
return b
|
||||
}
|
||||
}
|
||||
// convert singleton so both are arrays to normalize
|
||||
if _, ok := a.(*BaseSingletonPredictionContext); ok {
|
||||
a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
|
||||
|
||||
// Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters
|
||||
// here.
|
||||
//
|
||||
// TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here
|
||||
|
||||
var arp, arb *ArrayPredictionContext
|
||||
var ok bool
|
||||
if arp, ok = a.(*ArrayPredictionContext); ok {
|
||||
} else if _, ok = a.(*BaseSingletonPredictionContext); ok {
|
||||
arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
|
||||
} else if _, ok = a.(*EmptyPredictionContext); ok {
|
||||
arp = NewArrayPredictionContext([]PredictionContext{}, []int{})
|
||||
}
|
||||
if _, ok := b.(*BaseSingletonPredictionContext); ok {
|
||||
b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
|
||||
|
||||
if arb, ok = b.(*ArrayPredictionContext); ok {
|
||||
} else if _, ok = b.(*BaseSingletonPredictionContext); ok {
|
||||
arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
|
||||
} else if _, ok = b.(*EmptyPredictionContext); ok {
|
||||
arb = NewArrayPredictionContext([]PredictionContext{}, []int{})
|
||||
}
|
||||
return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
|
||||
|
||||
// Both arp and arb
|
||||
return mergeArrays(arp, arb, rootIsWildcard, mergeCache)
|
||||
}
|
||||
|
||||
//
|
||||
// Merge two {@link SingletonBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Stack tops equal, parents merge is same return left graph.<br>
|
||||
@@ -423,11 +465,11 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
|
||||
// /
|
||||
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous := mergeCache.Get(a.hash(), b.hash())
|
||||
previous := mergeCache.Get(a.Hash(), b.Hash())
|
||||
if previous != nil {
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
previous = mergeCache.Get(b.hash(), a.hash())
|
||||
previous = mergeCache.Get(b.Hash(), a.Hash())
|
||||
if previous != nil {
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
@@ -436,7 +478,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
|
||||
rootMerge := mergeRoot(a, b, rootIsWildcard)
|
||||
if rootMerge != nil {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), rootMerge)
|
||||
mergeCache.set(a.Hash(), b.Hash(), rootMerge)
|
||||
}
|
||||
return rootMerge
|
||||
}
|
||||
@@ -456,7 +498,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
|
||||
// Newjoined parent so create Newsingleton pointing to it, a'
|
||||
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), spc)
|
||||
mergeCache.set(a.Hash(), b.Hash(), spc)
|
||||
}
|
||||
return spc
|
||||
}
|
||||
@@ -478,7 +520,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
|
||||
parents := []PredictionContext{singleParent, singleParent}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), apc)
|
||||
mergeCache.set(a.Hash(), b.Hash(), apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
@@ -494,12 +536,11 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
|
||||
}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), apc)
|
||||
mergeCache.set(a.Hash(), b.Hash(), apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
|
||||
//
|
||||
// Handle case where at least one of {@code a} or {@code b} is
|
||||
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
|
||||
// to represent {@link //EMPTY}.
|
||||
@@ -561,7 +602,6 @@ func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionC
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// Merge two {@link ArrayBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Different tops, different parents.<br>
|
||||
@@ -583,12 +623,18 @@ func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionC
|
||||
// /
|
||||
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous := mergeCache.Get(a.hash(), b.hash())
|
||||
previous := mergeCache.Get(a.Hash(), b.Hash())
|
||||
if previous != nil {
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
previous = mergeCache.Get(b.hash(), a.hash())
|
||||
previous = mergeCache.Get(b.Hash(), a.Hash())
|
||||
if previous != nil {
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
}
|
||||
@@ -608,7 +654,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
|
||||
payload := a.returnStates[i]
|
||||
// $+$ = $
|
||||
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
|
||||
axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
|
||||
axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
|
||||
// ->
|
||||
// ax
|
||||
if bothDollars || axAX {
|
||||
@@ -651,7 +697,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
|
||||
if k == 1 { // for just one merged element, return singleton top
|
||||
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), pc)
|
||||
mergeCache.set(a.Hash(), b.Hash(), pc)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
@@ -663,27 +709,36 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
|
||||
|
||||
// if we created same array as a or b, return that instead
|
||||
// TODO: track whether this is possible above during merge sort for speed
|
||||
// TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
|
||||
if M == a {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), a)
|
||||
mergeCache.set(a.Hash(), b.Hash(), a)
|
||||
}
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
|
||||
}
|
||||
return a
|
||||
}
|
||||
if M == b {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), b)
|
||||
mergeCache.set(a.Hash(), b.Hash(), b)
|
||||
}
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
|
||||
}
|
||||
return b
|
||||
}
|
||||
combineCommonParents(mergedParents)
|
||||
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.hash(), b.hash(), M)
|
||||
mergeCache.set(a.Hash(), b.Hash(), M)
|
||||
}
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
|
||||
}
|
||||
return M
|
||||
}
|
||||
|
||||
//
|
||||
// Make pass over all <em>M</em> {@code parents} merge any {@code equals()}
|
||||
// ones.
|
||||
// /
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -70,7 +70,6 @@ const (
|
||||
PredictionModeLLExactAmbigDetection = 2
|
||||
)
|
||||
|
||||
//
|
||||
// Computes the SLL prediction termination condition.
|
||||
//
|
||||
// <p>
|
||||
@@ -108,9 +107,9 @@ const (
|
||||
// The single-alt-state thing lets prediction continue upon rules like
|
||||
// (otherwise, it would admit defeat too soon):</p>
|
||||
//
|
||||
// <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }</p>
|
||||
// <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }</p>
|
||||
//
|
||||
// <p>When the ATN simulation reaches the state before {@code ''}, it has a
|
||||
// <p>When the ATN simulation reaches the state before {@code ”}, it has a
|
||||
// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
|
||||
// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
|
||||
// processing this node because alternative to has another way to continue,
|
||||
@@ -152,16 +151,15 @@ const (
|
||||
//
|
||||
// <p>Before testing these configurations against others, we have to merge
|
||||
// {@code x} and {@code x'} (without modifying the existing configurations).
|
||||
// For example, we test {@code (x+x')==x''} when looking for conflicts in
|
||||
// For example, we test {@code (x+x')==x”} when looking for conflicts in
|
||||
// the following configurations.</p>
|
||||
//
|
||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
|
||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}</p>
|
||||
//
|
||||
// <p>If the configuration set has predicates (as indicated by
|
||||
// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
|
||||
// the configurations to strip out all of the predicates so that a standard
|
||||
// {@link ATNConfigSet} will merge everything ignoring predicates.</p>
|
||||
//
|
||||
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
|
||||
// Configs in rule stop states indicate reaching the end of the decision
|
||||
// rule (local context) or end of start rule (full context). If all
|
||||
@@ -229,7 +227,6 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
//
|
||||
// Full LL prediction termination.
|
||||
//
|
||||
// <p>Can we stop looking ahead during ATN simulation or is there some
|
||||
@@ -334,7 +331,7 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
|
||||
// </li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
|
||||
// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
|
||||
// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set
|
||||
// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
|
||||
// {@code {1}} => stop and predict 1</li>
|
||||
//
|
||||
@@ -369,31 +366,26 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
|
||||
// two or one and three so we keep going. We can only stop prediction when
|
||||
// we need exact ambiguity detection when the sets look like
|
||||
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
|
||||
//
|
||||
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
|
||||
return PredictionModegetSingleViableAlt(altsets)
|
||||
}
|
||||
|
||||
//
|
||||
// Determines if every alternative subset in {@code altsets} contains more
|
||||
// than one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if every {@link BitSet} in {@code altsets} has
|
||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
|
||||
//
|
||||
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
|
||||
return !PredictionModehasNonConflictingAltSet(altsets)
|
||||
}
|
||||
|
||||
//
|
||||
// Determines if any single alternative subset in {@code altsets} contains
|
||||
// exactly one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
|
||||
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
|
||||
//
|
||||
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
@@ -404,14 +396,12 @@ func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
//
|
||||
// Determines if any single alternative subset in {@code altsets} contains
|
||||
// more than one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
|
||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
|
||||
//
|
||||
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
@@ -422,13 +412,11 @@ func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
//
|
||||
// Determines if every alternative subset in {@code altsets} is equivalent.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if every member of {@code altsets} is equal to the
|
||||
// others, otherwise {@code false}
|
||||
//
|
||||
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
|
||||
var first *BitSet
|
||||
|
||||
@@ -444,13 +432,11 @@ func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
//
|
||||
// Returns the unique alternative predicted by all alternative subsets in
|
||||
// {@code altsets}. If no such alternative exists, this method returns
|
||||
// {@link ATN//INVALID_ALT_NUMBER}.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
//
|
||||
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
|
||||
all := PredictionModeGetAlts(altsets)
|
||||
if all.length() == 1 {
|
||||
@@ -466,7 +452,6 @@ func PredictionModegetUniqueAlt(altsets []*BitSet) int {
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return the set of represented alternatives in {@code altsets}
|
||||
//
|
||||
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
|
||||
all := NewBitSet()
|
||||
for _, alts := range altsets {
|
||||
@@ -475,44 +460,35 @@ func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
|
||||
return all
|
||||
}
|
||||
|
||||
//
|
||||
// This func gets the conflicting alt subsets from a configuration set.
|
||||
// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
|
||||
// For each configuration {@code c} in {@code configs}:
|
||||
//
|
||||
// <pre>
|
||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
|
||||
// alt and not pred
|
||||
// </pre>
|
||||
//
|
||||
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
|
||||
configToAlts := make(map[int]*BitSet)
|
||||
configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
|
||||
|
||||
for _, c := range configs.GetItems() {
|
||||
key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
|
||||
|
||||
alts, ok := configToAlts[key]
|
||||
alts, ok := configToAlts.Get(c)
|
||||
if !ok {
|
||||
alts = NewBitSet()
|
||||
configToAlts[key] = alts
|
||||
configToAlts.Put(c, alts)
|
||||
}
|
||||
alts.add(c.GetAlt())
|
||||
}
|
||||
|
||||
values := make([]*BitSet, 0, 10)
|
||||
for _, v := range configToAlts {
|
||||
values = append(values, v)
|
||||
}
|
||||
return values
|
||||
return configToAlts.Values()
|
||||
}
|
||||
|
||||
//
|
||||
// Get a map from state to alt subset from a configuration set. For each
|
||||
// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
|
||||
// configuration {@code c} in {@code configs}:
|
||||
//
|
||||
// <pre>
|
||||
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
|
||||
// </pre>
|
||||
//
|
||||
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
|
||||
m := NewAltDict()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -49,7 +49,7 @@ var tokenTypeMapCache = make(map[string]int)
|
||||
var ruleIndexMapCache = make(map[string]int)
|
||||
|
||||
func (b *BaseRecognizer) checkVersion(toolVersion string) {
|
||||
runtimeVersion := "4.10.1"
|
||||
runtimeVersion := "4.12.0"
|
||||
if runtimeVersion != toolVersion {
|
||||
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
|
||||
}
|
||||
@@ -108,7 +108,6 @@ func (b *BaseRecognizer) SetState(v int) {
|
||||
// Get a map from rule names to rule indexes.
|
||||
//
|
||||
// <p>Used for XPath and tree pattern compilation.</p>
|
||||
//
|
||||
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
|
||||
|
||||
panic("Method not defined!")
|
||||
@@ -171,18 +170,18 @@ func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
|
||||
}
|
||||
|
||||
// How should a token be displayed in an error message? The default
|
||||
// is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override in that case
|
||||
// to use t.String() (which, for CommonToken, dumps everything about
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a NewJava type.
|
||||
//
|
||||
// is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override in that case
|
||||
// to use t.String() (which, for CommonToken, dumps everything about
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a NewJava type.
|
||||
//
|
||||
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
|
||||
// implementations of {@link ANTLRErrorStrategy} may provide a similar
|
||||
// feature when necessary. For example, see
|
||||
// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
|
||||
//
|
||||
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
|
||||
if t == nil {
|
||||
return "<no token>"
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -18,12 +18,12 @@ import (
|
||||
//
|
||||
|
||||
type SemanticContext interface {
|
||||
comparable
|
||||
Equals(other Collectable[SemanticContext]) bool
|
||||
Hash() int
|
||||
|
||||
evaluate(parser Recognizer, outerContext RuleContext) bool
|
||||
evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
|
||||
|
||||
hash() int
|
||||
String() string
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
|
||||
//The default {@link SemanticContext}, which is semantically equivalent to
|
||||
//a predicate of the form {@code {true}?}.
|
||||
|
||||
var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
|
||||
var SemanticContextNone = NewPredicate(-1, -1, false)
|
||||
|
||||
func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
||||
return p
|
||||
@@ -95,7 +95,7 @@ func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
|
||||
}
|
||||
|
||||
func (p *Predicate) equals(other interface{}) bool {
|
||||
func (p *Predicate) Equals(other Collectable[SemanticContext]) bool {
|
||||
if p == other {
|
||||
return true
|
||||
} else if _, ok := other.(*Predicate); !ok {
|
||||
@@ -107,7 +107,7 @@ func (p *Predicate) equals(other interface{}) bool {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Predicate) hash() int {
|
||||
func (p *Predicate) Hash() int {
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, p.ruleIndex)
|
||||
h = murmurUpdate(h, p.predIndex)
|
||||
@@ -151,17 +151,22 @@ func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
|
||||
return p.precedence - other.precedence
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) equals(other interface{}) bool {
|
||||
if p == other {
|
||||
return true
|
||||
} else if _, ok := other.(*PrecedencePredicate); !ok {
|
||||
func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool {
|
||||
|
||||
var op *PrecedencePredicate
|
||||
var ok bool
|
||||
if op, ok = other.(*PrecedencePredicate); !ok {
|
||||
return false
|
||||
} else {
|
||||
return p.precedence == other.(*PrecedencePredicate).precedence
|
||||
}
|
||||
|
||||
if p == op {
|
||||
return true
|
||||
}
|
||||
|
||||
return p.precedence == other.(*PrecedencePredicate).precedence
|
||||
}
|
||||
|
||||
func (p *PrecedencePredicate) hash() int {
|
||||
func (p *PrecedencePredicate) Hash() int {
|
||||
h := uint32(1)
|
||||
h = 31*h + uint32(p.precedence)
|
||||
return int(h)
|
||||
@@ -171,10 +176,10 @@ func (p *PrecedencePredicate) String() string {
|
||||
return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
|
||||
}
|
||||
|
||||
func PrecedencePredicatefilterPrecedencePredicates(set Set) []*PrecedencePredicate {
|
||||
func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate {
|
||||
result := make([]*PrecedencePredicate, 0)
|
||||
|
||||
set.Each(func(v interface{}) bool {
|
||||
set.Each(func(v SemanticContext) bool {
|
||||
if c2, ok := v.(*PrecedencePredicate); ok {
|
||||
result = append(result, c2)
|
||||
}
|
||||
@@ -193,21 +198,21 @@ type AND struct {
|
||||
|
||||
func NewAND(a, b SemanticContext) *AND {
|
||||
|
||||
operands := newArray2DHashSet(nil, nil)
|
||||
operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
|
||||
if aa, ok := a.(*AND); ok {
|
||||
for _, o := range aa.opnds {
|
||||
operands.Add(o)
|
||||
operands.Put(o)
|
||||
}
|
||||
} else {
|
||||
operands.Add(a)
|
||||
operands.Put(a)
|
||||
}
|
||||
|
||||
if ba, ok := b.(*AND); ok {
|
||||
for _, o := range ba.opnds {
|
||||
operands.Add(o)
|
||||
operands.Put(o)
|
||||
}
|
||||
} else {
|
||||
operands.Add(b)
|
||||
operands.Put(b)
|
||||
}
|
||||
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
|
||||
if len(precedencePredicates) > 0 {
|
||||
@@ -220,7 +225,7 @@ func NewAND(a, b SemanticContext) *AND {
|
||||
}
|
||||
}
|
||||
|
||||
operands.Add(reduced)
|
||||
operands.Put(reduced)
|
||||
}
|
||||
|
||||
vs := operands.Values()
|
||||
@@ -235,14 +240,15 @@ func NewAND(a, b SemanticContext) *AND {
|
||||
return and
|
||||
}
|
||||
|
||||
func (a *AND) equals(other interface{}) bool {
|
||||
func (a *AND) Equals(other Collectable[SemanticContext]) bool {
|
||||
if a == other {
|
||||
return true
|
||||
} else if _, ok := other.(*AND); !ok {
|
||||
}
|
||||
if _, ok := other.(*AND); !ok {
|
||||
return false
|
||||
} else {
|
||||
for i, v := range other.(*AND).opnds {
|
||||
if !a.opnds[i].equals(v) {
|
||||
if !a.opnds[i].Equals(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -250,13 +256,11 @@ func (a *AND) equals(other interface{}) bool {
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>
|
||||
// The evaluation of predicates by a context is short-circuiting, but
|
||||
// unordered.</p>
|
||||
//
|
||||
func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
for i := 0; i < len(a.opnds); i++ {
|
||||
if !a.opnds[i].evaluate(parser, outerContext) {
|
||||
@@ -304,18 +308,18 @@ func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) Semant
|
||||
return result
|
||||
}
|
||||
|
||||
func (a *AND) hash() int {
|
||||
func (a *AND) Hash() int {
|
||||
h := murmurInit(37) // Init with a value different from OR
|
||||
for _, op := range a.opnds {
|
||||
h = murmurUpdate(h, op.hash())
|
||||
h = murmurUpdate(h, op.Hash())
|
||||
}
|
||||
return murmurFinish(h, len(a.opnds))
|
||||
}
|
||||
|
||||
func (a *OR) hash() int {
|
||||
func (a *OR) Hash() int {
|
||||
h := murmurInit(41) // Init with a value different from AND
|
||||
for _, op := range a.opnds {
|
||||
h = murmurUpdate(h, op.hash())
|
||||
h = murmurUpdate(h, op.Hash())
|
||||
}
|
||||
return murmurFinish(h, len(a.opnds))
|
||||
}
|
||||
@@ -345,21 +349,21 @@ type OR struct {
|
||||
|
||||
func NewOR(a, b SemanticContext) *OR {
|
||||
|
||||
operands := newArray2DHashSet(nil, nil)
|
||||
operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
|
||||
if aa, ok := a.(*OR); ok {
|
||||
for _, o := range aa.opnds {
|
||||
operands.Add(o)
|
||||
operands.Put(o)
|
||||
}
|
||||
} else {
|
||||
operands.Add(a)
|
||||
operands.Put(a)
|
||||
}
|
||||
|
||||
if ba, ok := b.(*OR); ok {
|
||||
for _, o := range ba.opnds {
|
||||
operands.Add(o)
|
||||
operands.Put(o)
|
||||
}
|
||||
} else {
|
||||
operands.Add(b)
|
||||
operands.Put(b)
|
||||
}
|
||||
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
|
||||
if len(precedencePredicates) > 0 {
|
||||
@@ -372,7 +376,7 @@ func NewOR(a, b SemanticContext) *OR {
|
||||
}
|
||||
}
|
||||
|
||||
operands.Add(reduced)
|
||||
operands.Put(reduced)
|
||||
}
|
||||
|
||||
vs := operands.Values()
|
||||
@@ -388,14 +392,14 @@ func NewOR(a, b SemanticContext) *OR {
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *OR) equals(other interface{}) bool {
|
||||
func (o *OR) Equals(other Collectable[SemanticContext]) bool {
|
||||
if o == other {
|
||||
return true
|
||||
} else if _, ok := other.(*OR); !ok {
|
||||
return false
|
||||
} else {
|
||||
for i, v := range other.(*OR).opnds {
|
||||
if !o.opnds[i].equals(v) {
|
||||
if !o.opnds[i].Equals(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -406,7 +410,6 @@ func (o *OR) equals(other interface{}) bool {
|
||||
// <p>
|
||||
// The evaluation of predicates by o context is short-circuiting, but
|
||||
// unordered.</p>
|
||||
//
|
||||
func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
||||
for i := 0; i < len(o.opnds); i++ {
|
||||
if o.opnds[i].evaluate(parser, outerContext) {
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -158,7 +158,6 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start
|
||||
// {@link Token//GetInputStream}.</p>
|
||||
//
|
||||
// @param oldToken The token to copy.
|
||||
//
|
||||
func (c *CommonToken) clone() *CommonToken {
|
||||
t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
|
||||
t.tokenIndex = c.GetTokenIndex()
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
|
||||
//
|
||||
//
|
||||
// Useful for rewriting out a buffered input token stream after doing some
|
||||
// augmentation or other manipulations on it.
|
||||
|
||||
@@ -85,12 +85,10 @@ import (
|
||||
// If you don't use named rewrite streams, a "default" stream is used as the
|
||||
// first example shows.</p>
|
||||
|
||||
|
||||
|
||||
const(
|
||||
const (
|
||||
Default_Program_Name = "default"
|
||||
Program_Init_Size = 100
|
||||
Min_Token_Index = 0
|
||||
Program_Init_Size = 100
|
||||
Min_Token_Index = 0
|
||||
)
|
||||
|
||||
// Define the rewrite operation hierarchy
|
||||
@@ -98,13 +96,13 @@ const(
|
||||
type RewriteOperation interface {
|
||||
// Execute the rewrite operation by possibly adding to the buffer.
|
||||
// Return the index of the next token to operate on.
|
||||
Execute(buffer *bytes.Buffer) int
|
||||
String() string
|
||||
GetInstructionIndex() int
|
||||
GetIndex() int
|
||||
GetText() string
|
||||
GetOpName() string
|
||||
GetTokens() TokenStream
|
||||
Execute(buffer *bytes.Buffer) int
|
||||
String() string
|
||||
GetInstructionIndex() int
|
||||
GetIndex() int
|
||||
GetText() string
|
||||
GetOpName() string
|
||||
GetTokens() TokenStream
|
||||
SetInstructionIndex(val int)
|
||||
SetIndex(int)
|
||||
SetText(string)
|
||||
@@ -114,63 +112,62 @@ type RewriteOperation interface {
|
||||
|
||||
type BaseRewriteOperation struct {
|
||||
//Current index of rewrites list
|
||||
instruction_index int
|
||||
instruction_index int
|
||||
//Token buffer index
|
||||
index int
|
||||
index int
|
||||
//Substitution text
|
||||
text string
|
||||
text string
|
||||
//Actual operation name
|
||||
op_name string
|
||||
op_name string
|
||||
//Pointer to token steam
|
||||
tokens TokenStream
|
||||
tokens TokenStream
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)GetInstructionIndex() int{
|
||||
func (op *BaseRewriteOperation) GetInstructionIndex() int {
|
||||
return op.instruction_index
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)GetIndex() int{
|
||||
func (op *BaseRewriteOperation) GetIndex() int {
|
||||
return op.index
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)GetText() string{
|
||||
func (op *BaseRewriteOperation) GetText() string {
|
||||
return op.text
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)GetOpName() string{
|
||||
func (op *BaseRewriteOperation) GetOpName() string {
|
||||
return op.op_name
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)GetTokens() TokenStream{
|
||||
func (op *BaseRewriteOperation) GetTokens() TokenStream {
|
||||
return op.tokens
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)SetInstructionIndex(val int){
|
||||
func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
|
||||
op.instruction_index = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)SetIndex(val int) {
|
||||
func (op *BaseRewriteOperation) SetIndex(val int) {
|
||||
op.index = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)SetText(val string){
|
||||
func (op *BaseRewriteOperation) SetText(val string) {
|
||||
op.text = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)SetOpName(val string){
|
||||
func (op *BaseRewriteOperation) SetOpName(val string) {
|
||||
op.op_name = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
|
||||
func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
|
||||
op.tokens = val
|
||||
}
|
||||
|
||||
|
||||
func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
|
||||
func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int {
|
||||
return op.index
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) String() string {
|
||||
func (op *BaseRewriteOperation) String() string {
|
||||
return fmt.Sprintf("<%s@%d:\"%s\">",
|
||||
op.op_name,
|
||||
op.tokens.Get(op.GetIndex()),
|
||||
@@ -179,26 +176,25 @@ func (op *BaseRewriteOperation) String() string {
|
||||
|
||||
}
|
||||
|
||||
|
||||
type InsertBeforeOp struct {
|
||||
BaseRewriteOperation
|
||||
}
|
||||
|
||||
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
|
||||
return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
|
||||
index:index,
|
||||
text:text,
|
||||
op_name:"InsertBeforeOp",
|
||||
tokens:stream,
|
||||
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
|
||||
return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
|
||||
index: index,
|
||||
text: text,
|
||||
op_name: "InsertBeforeOp",
|
||||
tokens: stream,
|
||||
}}
|
||||
}
|
||||
|
||||
func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
|
||||
func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
|
||||
buffer.WriteString(op.text)
|
||||
if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
|
||||
if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
|
||||
buffer.WriteString(op.tokens.Get(op.index).GetText())
|
||||
}
|
||||
return op.index+1
|
||||
return op.index + 1
|
||||
}
|
||||
|
||||
func (op *InsertBeforeOp) String() string {
|
||||
@@ -213,20 +209,20 @@ type InsertAfterOp struct {
|
||||
BaseRewriteOperation
|
||||
}
|
||||
|
||||
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
|
||||
return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
|
||||
index:index+1,
|
||||
text:text,
|
||||
tokens:stream,
|
||||
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
|
||||
return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{
|
||||
index: index + 1,
|
||||
text: text,
|
||||
tokens: stream,
|
||||
}}
|
||||
}
|
||||
|
||||
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
|
||||
buffer.WriteString(op.text)
|
||||
if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
|
||||
if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
|
||||
buffer.WriteString(op.tokens.Get(op.index).GetText())
|
||||
}
|
||||
return op.index+1
|
||||
return op.index + 1
|
||||
}
|
||||
|
||||
func (op *InsertAfterOp) String() string {
|
||||
@@ -235,28 +231,28 @@ func (op *InsertAfterOp) String() string {
|
||||
|
||||
// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
|
||||
// instructions.
|
||||
type ReplaceOp struct{
|
||||
type ReplaceOp struct {
|
||||
BaseRewriteOperation
|
||||
LastIndex int
|
||||
}
|
||||
|
||||
func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
|
||||
func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
|
||||
return &ReplaceOp{
|
||||
BaseRewriteOperation:BaseRewriteOperation{
|
||||
index:from,
|
||||
text:text,
|
||||
op_name:"ReplaceOp",
|
||||
tokens:stream,
|
||||
BaseRewriteOperation: BaseRewriteOperation{
|
||||
index: from,
|
||||
text: text,
|
||||
op_name: "ReplaceOp",
|
||||
tokens: stream,
|
||||
},
|
||||
LastIndex:to,
|
||||
LastIndex: to,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
|
||||
if op.text != ""{
|
||||
func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
|
||||
if op.text != "" {
|
||||
buffer.WriteString(op.text)
|
||||
}
|
||||
return op.LastIndex +1
|
||||
return op.LastIndex + 1
|
||||
}
|
||||
|
||||
func (op *ReplaceOp) String() string {
|
||||
@@ -268,54 +264,54 @@ func (op *ReplaceOp) String() string {
|
||||
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
|
||||
}
|
||||
|
||||
|
||||
type TokenStreamRewriter struct {
|
||||
//Our source stream
|
||||
tokens TokenStream
|
||||
tokens TokenStream
|
||||
// You may have multiple, named streams of rewrite operations.
|
||||
// I'm calling these things "programs."
|
||||
// Maps String (name) → rewrite (List)
|
||||
programs map[string][]RewriteOperation
|
||||
last_rewrite_token_indexes map[string]int
|
||||
programs map[string][]RewriteOperation
|
||||
last_rewrite_token_indexes map[string]int
|
||||
}
|
||||
|
||||
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
|
||||
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
|
||||
return &TokenStreamRewriter{
|
||||
tokens: tokens,
|
||||
programs: map[string][]RewriteOperation{
|
||||
Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
|
||||
tokens: tokens,
|
||||
programs: map[string][]RewriteOperation{
|
||||
Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size),
|
||||
},
|
||||
last_rewrite_token_indexes: map[string]int{},
|
||||
last_rewrite_token_indexes: map[string]int{},
|
||||
}
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
|
||||
func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
|
||||
return tsr.tokens
|
||||
}
|
||||
|
||||
// Rollback the instruction stream for a program so that
|
||||
// the indicated instruction (via instructionIndex) is no
|
||||
// longer in the stream. UNTESTED!
|
||||
func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
|
||||
is, ok := tsr.programs[program_name]
|
||||
if ok{
|
||||
// Rollback the instruction stream for a program so that
|
||||
// the indicated instruction (via instructionIndex) is no
|
||||
// longer in the stream. UNTESTED!
|
||||
func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) {
|
||||
is, ok := tsr.programs[program_name]
|
||||
if ok {
|
||||
tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
|
||||
}
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
|
||||
func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) {
|
||||
tsr.Rollback(Default_Program_Name, instruction_index)
|
||||
}
|
||||
//Reset the program so that no instructions exist
|
||||
func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
|
||||
|
||||
// Reset the program so that no instructions exist
|
||||
func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) {
|
||||
tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
|
||||
func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
|
||||
tsr.DeleteProgram(Default_Program_Name)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
|
||||
func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) {
|
||||
// to insert after, just insert before next index (even if past end)
|
||||
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(program_name)
|
||||
@@ -323,31 +319,31 @@ func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text
|
||||
tsr.AddToProgram(program_name, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
|
||||
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
|
||||
tsr.InsertAfter(Default_Program_Name, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
|
||||
func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) {
|
||||
tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
|
||||
func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) {
|
||||
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(program_name)
|
||||
op.SetInstructionIndex(len(rewrites))
|
||||
tsr.AddToProgram(program_name, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
|
||||
tsr.InsertBefore(Default_Program_Name, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) {
|
||||
tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
|
||||
if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
|
||||
func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) {
|
||||
if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
|
||||
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
|
||||
from, to, tsr.tokens.Size()))
|
||||
}
|
||||
@@ -357,207 +353,216 @@ func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text
|
||||
tsr.AddToProgram(program_name, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
|
||||
func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
|
||||
tsr.Replace(Default_Program_Name, from, to, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
|
||||
func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
|
||||
tsr.ReplaceDefault(index, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
|
||||
func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) {
|
||||
tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
|
||||
func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
|
||||
tsr.ReplaceToken(Default_Program_Name, from, to, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
|
||||
func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
|
||||
tsr.ReplaceTokenDefault(index, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
|
||||
tsr.Replace(program_name, from, to, "" )
|
||||
func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) {
|
||||
tsr.Replace(program_name, from, to, "")
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
|
||||
func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
|
||||
tsr.Delete(Default_Program_Name, from, to)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
|
||||
tsr.DeleteDefault(index,index)
|
||||
func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
|
||||
tsr.DeleteDefault(index, index)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
|
||||
func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) {
|
||||
tsr.ReplaceToken(program_name, from, to, "")
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
|
||||
func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
|
||||
tsr.DeleteToken(Default_Program_Name, from, to)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
|
||||
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int {
|
||||
i, ok := tsr.last_rewrite_token_indexes[program_name]
|
||||
if !ok{
|
||||
if !ok {
|
||||
return -1
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
|
||||
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
|
||||
return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
|
||||
func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) {
|
||||
tsr.last_rewrite_token_indexes[program_name] = i
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
|
||||
func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
|
||||
is := make([]RewriteOperation, 0, Program_Init_Size)
|
||||
tsr.programs[name] = is
|
||||
return is
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
|
||||
func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
|
||||
is := tsr.GetProgram(name)
|
||||
is = append(is, op)
|
||||
tsr.programs[name] = is
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
|
||||
func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
|
||||
is, ok := tsr.programs[name]
|
||||
if !ok{
|
||||
if !ok {
|
||||
is = tsr.InitializeProgram(name)
|
||||
}
|
||||
return is
|
||||
}
|
||||
// Return the text from the original tokens altered per the
|
||||
// instructions given to this rewriter.
|
||||
func (tsr *TokenStreamRewriter)GetTextDefault() string{
|
||||
|
||||
// Return the text from the original tokens altered per the
|
||||
// instructions given to this rewriter.
|
||||
func (tsr *TokenStreamRewriter) GetTextDefault() string {
|
||||
return tsr.GetText(
|
||||
Default_Program_Name,
|
||||
NewInterval(0, tsr.tokens.Size()-1))
|
||||
}
|
||||
// Return the text from the original tokens altered per the
|
||||
// instructions given to this rewriter.
|
||||
func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
|
||||
|
||||
// Return the text from the original tokens altered per the
|
||||
// instructions given to this rewriter.
|
||||
func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string {
|
||||
rewrites := tsr.programs[program_name]
|
||||
start := interval.Start
|
||||
stop := interval.Stop
|
||||
stop := interval.Stop
|
||||
// ensure start/end are in range
|
||||
stop = min(stop, tsr.tokens.Size()-1)
|
||||
start = max(start,0)
|
||||
if rewrites == nil || len(rewrites) == 0{
|
||||
start = max(start, 0)
|
||||
if rewrites == nil || len(rewrites) == 0 {
|
||||
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
|
||||
}
|
||||
buf := bytes.Buffer{}
|
||||
// First, optimize instruction stream
|
||||
indexToOp := reduceToSingleOperationPerIndex(rewrites)
|
||||
// Walk buffer, executing instructions and emitting tokens
|
||||
for i:=start; i<=stop && i<tsr.tokens.Size();{
|
||||
for i := start; i <= stop && i < tsr.tokens.Size(); {
|
||||
op := indexToOp[i]
|
||||
delete(indexToOp, i)// remove so any left have index size-1
|
||||
delete(indexToOp, i) // remove so any left have index size-1
|
||||
t := tsr.tokens.Get(i)
|
||||
if op == nil{
|
||||
if op == nil {
|
||||
// no operation at that index, just dump token
|
||||
if t.GetTokenType() != TokenEOF {buf.WriteString(t.GetText())}
|
||||
if t.GetTokenType() != TokenEOF {
|
||||
buf.WriteString(t.GetText())
|
||||
}
|
||||
i++ // move to next token
|
||||
}else {
|
||||
i = op.Execute(&buf)// execute operation and skip
|
||||
} else {
|
||||
i = op.Execute(&buf) // execute operation and skip
|
||||
}
|
||||
}
|
||||
// include stuff after end if it's last index in buffer
|
||||
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
||||
// foo if end==lastValidIndex.
|
||||
if stop == tsr.tokens.Size()-1{
|
||||
if stop == tsr.tokens.Size()-1 {
|
||||
// Scan any remaining operations after last token
|
||||
// should be included (they will be inserts).
|
||||
for _, op := range indexToOp{
|
||||
if op.GetIndex() >= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
|
||||
for _, op := range indexToOp {
|
||||
if op.GetIndex() >= tsr.tokens.Size()-1 {
|
||||
buf.WriteString(op.GetText())
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// We need to combine operations and report invalid operations (like
|
||||
// overlapping replaces that are not completed nested). Inserts to
|
||||
// same index need to be combined etc... Here are the cases:
|
||||
// We need to combine operations and report invalid operations (like
|
||||
// overlapping replaces that are not completed nested). Inserts to
|
||||
// same index need to be combined etc... Here are the cases:
|
||||
//
|
||||
// I.i.u I.j.v leave alone, nonoverlapping
|
||||
// I.i.u I.i.v combine: Iivu
|
||||
// I.i.u I.j.v leave alone, nonoverlapping
|
||||
// I.i.u I.i.v combine: Iivu
|
||||
//
|
||||
// R.i-j.u R.x-y.v | i-j in x-y delete first R
|
||||
// R.i-j.u R.i-j.v delete first R
|
||||
// R.i-j.u R.x-y.v | x-y in i-j ERROR
|
||||
// R.i-j.u R.x-y.v | boundaries overlap ERROR
|
||||
// R.i-j.u R.x-y.v | i-j in x-y delete first R
|
||||
// R.i-j.u R.i-j.v delete first R
|
||||
// R.i-j.u R.x-y.v | x-y in i-j ERROR
|
||||
// R.i-j.u R.x-y.v | boundaries overlap ERROR
|
||||
//
|
||||
// Delete special case of replace (text==null):
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
// Delete special case of replace (text==null):
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
//
|
||||
// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
|
||||
// we're not deleting i)
|
||||
// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
|
||||
// R.x-y.v I.i.u | i in x-y ERROR
|
||||
// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
|
||||
// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
|
||||
// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
|
||||
// we're not deleting i)
|
||||
// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
|
||||
// R.x-y.v I.i.u | i in x-y ERROR
|
||||
// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
|
||||
// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
|
||||
//
|
||||
// I.i.u = insert u before op @ index i
|
||||
// R.x-y.u = replace x-y indexed tokens with u
|
||||
// I.i.u = insert u before op @ index i
|
||||
// R.x-y.u = replace x-y indexed tokens with u
|
||||
//
|
||||
// First we need to examine replaces. For any replace op:
|
||||
// First we need to examine replaces. For any replace op:
|
||||
//
|
||||
// 1. wipe out any insertions before op within that range.
|
||||
// 2. Drop any replace op before that is contained completely within
|
||||
// that range.
|
||||
// 3. Throw exception upon boundary overlap with any previous replace.
|
||||
// 1. wipe out any insertions before op within that range.
|
||||
// 2. Drop any replace op before that is contained completely within
|
||||
// that range.
|
||||
// 3. Throw exception upon boundary overlap with any previous replace.
|
||||
//
|
||||
// Then we can deal with inserts:
|
||||
// Then we can deal with inserts:
|
||||
//
|
||||
// 1. for any inserts to same index, combine even if not adjacent.
|
||||
// 2. for any prior replace with same left boundary, combine this
|
||||
// insert with replace and delete this replace.
|
||||
// 3. throw exception if index in same range as previous replace
|
||||
// 1. for any inserts to same index, combine even if not adjacent.
|
||||
// 2. for any prior replace with same left boundary, combine this
|
||||
// insert with replace and delete this replace.
|
||||
// 3. throw exception if index in same range as previous replace
|
||||
//
|
||||
// Don't actually delete; make op null in list. Easier to walk list.
|
||||
// Later we can throw as we add to index → op map.
|
||||
// Don't actually delete; make op null in list. Easier to walk list.
|
||||
// Later we can throw as we add to index → op map.
|
||||
//
|
||||
// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
|
||||
// inserted stuff would be before the replace range. But, if you
|
||||
// add tokens in front of a method body '{' and then delete the method
|
||||
// body, I think the stuff before the '{' you added should disappear too.
|
||||
// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
|
||||
// inserted stuff would be before the replace range. But, if you
|
||||
// add tokens in front of a method body '{' and then delete the method
|
||||
// body, I think the stuff before the '{' you added should disappear too.
|
||||
//
|
||||
// Return a map from token index to operation.
|
||||
//
|
||||
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
|
||||
// Return a map from token index to operation.
|
||||
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
|
||||
// WALK REPLACES
|
||||
for i:=0; i < len(rewrites); i++{
|
||||
for i := 0; i < len(rewrites); i++ {
|
||||
op := rewrites[i]
|
||||
if op == nil{continue}
|
||||
if op == nil {
|
||||
continue
|
||||
}
|
||||
rop, ok := op.(*ReplaceOp)
|
||||
if !ok{continue}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// Wipe prior inserts within range
|
||||
for j:=0; j<i && j < len(rewrites); j++{
|
||||
if iop, ok := rewrites[j].(*InsertBeforeOp);ok{
|
||||
if iop.index == rop.index{
|
||||
for j := 0; j < i && j < len(rewrites); j++ {
|
||||
if iop, ok := rewrites[j].(*InsertBeforeOp); ok {
|
||||
if iop.index == rop.index {
|
||||
// E.g., insert before 2, delete 2..2; update replace
|
||||
// text to include insert before, kill insert
|
||||
rewrites[iop.instruction_index] = nil
|
||||
if rop.text != ""{
|
||||
if rop.text != "" {
|
||||
rop.text = iop.text + rop.text
|
||||
}else{
|
||||
} else {
|
||||
rop.text = iop.text
|
||||
}
|
||||
}else if iop.index > rop.index && iop.index <=rop.LastIndex{
|
||||
} else if iop.index > rop.index && iop.index <= rop.LastIndex {
|
||||
// delete insert as it's a no-op.
|
||||
rewrites[iop.instruction_index] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// Drop any prior replaces contained within
|
||||
for j:=0; j<i && j < len(rewrites); j++{
|
||||
if prevop, ok := rewrites[j].(*ReplaceOp);ok{
|
||||
if prevop.index>=rop.index && prevop.LastIndex <= rop.LastIndex{
|
||||
for j := 0; j < i && j < len(rewrites); j++ {
|
||||
if prevop, ok := rewrites[j].(*ReplaceOp); ok {
|
||||
if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
|
||||
// delete replace as it's a no-op.
|
||||
rewrites[prevop.instruction_index] = nil
|
||||
continue
|
||||
@@ -566,61 +571,67 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
|
||||
disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
|
||||
// Delete special case of replace (text==null):
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
if prevop.text == "" && rop.text == "" && !disjoint{
|
||||
if prevop.text == "" && rop.text == "" && !disjoint {
|
||||
rewrites[prevop.instruction_index] = nil
|
||||
rop.index = min(prevop.index, rop.index)
|
||||
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
|
||||
println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
|
||||
}else if !disjoint{
|
||||
} else if !disjoint {
|
||||
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// WALK INSERTS
|
||||
for i:=0; i < len(rewrites); i++ {
|
||||
for i := 0; i < len(rewrites); i++ {
|
||||
op := rewrites[i]
|
||||
if op == nil{continue}
|
||||
if op == nil {
|
||||
continue
|
||||
}
|
||||
//hack to replicate inheritance in composition
|
||||
_, iok := rewrites[i].(*InsertBeforeOp)
|
||||
_, aok := rewrites[i].(*InsertAfterOp)
|
||||
if !iok && !aok{continue}
|
||||
if !iok && !aok {
|
||||
continue
|
||||
}
|
||||
iop := rewrites[i]
|
||||
// combine current insert with prior if any at same index
|
||||
// deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
|
||||
for j:=0; j<i && j < len(rewrites); j++{
|
||||
if nextIop, ok := rewrites[j].(*InsertAfterOp); ok{
|
||||
if nextIop.index == iop.GetIndex(){
|
||||
for j := 0; j < i && j < len(rewrites); j++ {
|
||||
if nextIop, ok := rewrites[j].(*InsertAfterOp); ok {
|
||||
if nextIop.index == iop.GetIndex() {
|
||||
iop.SetText(nextIop.text + iop.GetText())
|
||||
rewrites[j] = nil
|
||||
}
|
||||
}
|
||||
if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok{
|
||||
if prevIop.index == iop.GetIndex(){
|
||||
if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
|
||||
if prevIop.index == iop.GetIndex() {
|
||||
iop.SetText(iop.GetText() + prevIop.text)
|
||||
rewrites[prevIop.instruction_index] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// look for replaces where iop.index is in range; error
|
||||
for j:=0; j<i && j < len(rewrites); j++{
|
||||
if rop,ok := rewrites[j].(*ReplaceOp); ok{
|
||||
if iop.GetIndex() == rop.index{
|
||||
for j := 0; j < i && j < len(rewrites); j++ {
|
||||
if rop, ok := rewrites[j].(*ReplaceOp); ok {
|
||||
if iop.GetIndex() == rop.index {
|
||||
rop.text = iop.GetText() + rop.text
|
||||
rewrites[i] = nil
|
||||
continue
|
||||
}
|
||||
if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex{
|
||||
panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
|
||||
if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
|
||||
panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
m := map[int]RewriteOperation{}
|
||||
for i:=0; i < len(rewrites); i++{
|
||||
for i := 0; i < len(rewrites); i++ {
|
||||
op := rewrites[i]
|
||||
if op == nil {continue}
|
||||
if _, ok := m[op.GetIndex()]; ok{
|
||||
if op == nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := m[op.GetIndex()]; ok {
|
||||
panic("should only be one op per index")
|
||||
}
|
||||
m[op.GetIndex()] = op
|
||||
@@ -628,22 +639,21 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
|
||||
return m
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Quick fixing Go lack of overloads
|
||||
*/
|
||||
*/
|
||||
|
||||
func max(a,b int)int{
|
||||
if a>b{
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}else {
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
func min(a,b int)int{
|
||||
if a<b{
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}else {
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -234,10 +234,8 @@ func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
|
||||
// then by triggering the event specific to the given parse tree node
|
||||
//
|
||||
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
|
||||
ctx := r.GetRuleContext().(ParserRuleContext)
|
||||
listener.EnterEveryRule(ctx)
|
||||
@@ -246,7 +244,6 @@ func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
|
||||
|
||||
// Exits a grammar rule by first triggering the event specific to the given parse tree node
|
||||
// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
|
||||
//
|
||||
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
|
||||
ctx := r.GetRuleContext().(ParserRuleContext)
|
||||
ctx.ExitRule(listener)
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -9,8 +9,9 @@ import "fmt"
|
||||
/** A set of utility routines useful for all kinds of ANTLR trees. */
|
||||
|
||||
// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
|
||||
// node payloads to get the text for the nodes. Detect
|
||||
// parse trees and extract data appropriately.
|
||||
//
|
||||
// node payloads to get the text for the nodes. Detect
|
||||
// parse trees and extract data appropriately.
|
||||
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
|
||||
|
||||
if recog != nil {
|
||||
@@ -80,8 +81,8 @@ func TreesGetChildren(t Tree) []Tree {
|
||||
}
|
||||
|
||||
// Return a list of all ancestors of this node. The first node of
|
||||
// list is the root and the last is the parent of this node.
|
||||
//
|
||||
// list is the root and the last is the parent of this node.
|
||||
func TreesgetAncestors(t Tree) []Tree {
|
||||
ancestors := make([]Tree, 0)
|
||||
t = t.GetParent()
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -47,28 +47,25 @@ func (s *IntStack) Push(e int) {
|
||||
*s = append(*s, e)
|
||||
}
|
||||
|
||||
func standardEqualsFunction(a interface{}, b interface{}) bool {
|
||||
type comparable interface {
|
||||
Equals(other Collectable[any]) bool
|
||||
}
|
||||
|
||||
ac, oka := a.(comparable)
|
||||
bc, okb := b.(comparable)
|
||||
func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool {
|
||||
|
||||
if !oka || !okb {
|
||||
panic("Not Comparable")
|
||||
}
|
||||
|
||||
return ac.equals(bc)
|
||||
return a.Equals(b)
|
||||
}
|
||||
|
||||
func standardHashFunction(a interface{}) int {
|
||||
if h, ok := a.(hasher); ok {
|
||||
return h.hash()
|
||||
return h.Hash()
|
||||
}
|
||||
|
||||
panic("Not Hasher")
|
||||
}
|
||||
|
||||
type hasher interface {
|
||||
hash() int
|
||||
Hash() int
|
||||
}
|
||||
|
||||
const bitsPerWord = 64
|
||||
@@ -171,7 +168,7 @@ func (b *BitSet) equals(other interface{}) bool {
|
||||
|
||||
// We only compare set bits, so we cannot rely on the two slices having the same size. Its
|
||||
// possible for two BitSets to have different slice lengths but the same set bits. So we only
|
||||
// compare the relavent words and ignore the trailing zeros.
|
||||
// compare the relevant words and ignore the trailing zeros.
|
||||
bLen := b.minLen()
|
||||
otherLen := otherBitSet.minLen()
|
||||
|
||||
@@ -8,8 +8,6 @@ const (
|
||||
_loadFactor = 0.75
|
||||
)
|
||||
|
||||
var _ Set = (*array2DHashSet)(nil)
|
||||
|
||||
type Set interface {
|
||||
Add(value interface{}) (added interface{})
|
||||
Len() int
|
||||
@@ -20,9 +18,9 @@ type Set interface {
|
||||
}
|
||||
|
||||
type array2DHashSet struct {
|
||||
buckets [][]interface{}
|
||||
buckets [][]Collectable[any]
|
||||
hashcodeFunction func(interface{}) int
|
||||
equalsFunction func(interface{}, interface{}) bool
|
||||
equalsFunction func(Collectable[any], Collectable[any]) bool
|
||||
|
||||
n int // How many elements in set
|
||||
threshold int // when to expand
|
||||
@@ -61,11 +59,11 @@ func (as *array2DHashSet) Values() []interface{} {
|
||||
return values
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Contains(value interface{}) bool {
|
||||
func (as *array2DHashSet) Contains(value Collectable[any]) bool {
|
||||
return as.Get(value) != nil
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Add(value interface{}) interface{} {
|
||||
func (as *array2DHashSet) Add(value Collectable[any]) interface{} {
|
||||
if as.n > as.threshold {
|
||||
as.expand()
|
||||
}
|
||||
@@ -98,7 +96,7 @@ func (as *array2DHashSet) expand() {
|
||||
|
||||
b := as.getBuckets(o)
|
||||
bucketLength := newBucketLengths[b]
|
||||
var newBucket []interface{}
|
||||
var newBucket []Collectable[any]
|
||||
if bucketLength == 0 {
|
||||
// new bucket
|
||||
newBucket = as.createBucket(as.initialBucketCapacity)
|
||||
@@ -107,7 +105,7 @@ func (as *array2DHashSet) expand() {
|
||||
newBucket = newTable[b]
|
||||
if bucketLength == len(newBucket) {
|
||||
// expand
|
||||
newBucketCopy := make([]interface{}, len(newBucket)<<1)
|
||||
newBucketCopy := make([]Collectable[any], len(newBucket)<<1)
|
||||
copy(newBucketCopy[:bucketLength], newBucket)
|
||||
newBucket = newBucketCopy
|
||||
newTable[b] = newBucket
|
||||
@@ -124,7 +122,7 @@ func (as *array2DHashSet) Len() int {
|
||||
return as.n
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Get(o interface{}) interface{} {
|
||||
func (as *array2DHashSet) Get(o Collectable[any]) interface{} {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -147,7 +145,7 @@ func (as *array2DHashSet) Get(o interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
|
||||
func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} {
|
||||
b := as.getBuckets(o)
|
||||
|
||||
bucket := as.buckets[b]
|
||||
@@ -178,7 +176,7 @@ func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
|
||||
|
||||
// full bucket, expand and add to end
|
||||
oldLength := len(bucket)
|
||||
bucketCopy := make([]interface{}, oldLength<<1)
|
||||
bucketCopy := make([]Collectable[any], oldLength<<1)
|
||||
copy(bucketCopy[:oldLength], bucket)
|
||||
bucket = bucketCopy
|
||||
as.buckets[b] = bucket
|
||||
@@ -187,22 +185,22 @@ func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
|
||||
return o
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) getBuckets(value interface{}) int {
|
||||
func (as *array2DHashSet) getBuckets(value Collectable[any]) int {
|
||||
hash := as.hashcodeFunction(value)
|
||||
return hash & (len(as.buckets) - 1)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) createBuckets(cap int) [][]interface{} {
|
||||
return make([][]interface{}, cap)
|
||||
func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] {
|
||||
return make([][]Collectable[any], cap)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) createBucket(cap int) []interface{} {
|
||||
return make([]interface{}, cap)
|
||||
func (as *array2DHashSet) createBucket(cap int) []Collectable[any] {
|
||||
return make([]Collectable[any], cap)
|
||||
}
|
||||
|
||||
func newArray2DHashSetWithCap(
|
||||
hashcodeFunction func(interface{}) int,
|
||||
equalsFunction func(interface{}, interface{}) bool,
|
||||
equalsFunction func(Collectable[any], Collectable[any]) bool,
|
||||
initCap int,
|
||||
initBucketCap int,
|
||||
) *array2DHashSet {
|
||||
@@ -231,7 +229,7 @@ func newArray2DHashSetWithCap(
|
||||
|
||||
func newArray2DHashSet(
|
||||
hashcodeFunction func(interface{}) int,
|
||||
equalsFunction func(interface{}, interface{}) bool,
|
||||
equalsFunction func(Collectable[any], Collectable[any]) bool,
|
||||
) *array2DHashSet {
|
||||
return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
|
||||
}
|
||||
10
vendor/github.com/cenkalti/backoff/v4/.travis.yml
generated
vendored
10
vendor/github.com/cenkalti/backoff/v4/.travis.yml
generated
vendored
@@ -1,10 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.13
|
||||
- 1.x
|
||||
- tip
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci
|
||||
50
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
50
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
@@ -5,10 +5,20 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type OperationWithData[T any] func() (T, error)
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
func (o Operation) withEmptyData() OperationWithData[struct{}] {
|
||||
return func() (struct{}, error) {
|
||||
return struct{}{}, o()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
@@ -28,18 +38,41 @@ func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryWithData is like Retry but returns data in the response too.
|
||||
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
|
||||
return RetryNotifyWithData(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
|
||||
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
var err error
|
||||
var next time.Duration
|
||||
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
|
||||
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, t)
|
||||
}
|
||||
|
||||
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
var (
|
||||
err error
|
||||
next time.Duration
|
||||
res T
|
||||
)
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
@@ -52,21 +85,22 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
if err = operation(); err == nil {
|
||||
return nil
|
||||
res, err = operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return permanent.Err
|
||||
return res, permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if cerr := ctx.Err(); cerr != nil {
|
||||
return cerr
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
return err
|
||||
return res, err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
@@ -77,7 +111,7 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
return res, ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
|
||||
31
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
31
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -3,8 +3,7 @@
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
|
||||
## Compatibility
|
||||
|
||||
@@ -45,19 +47,20 @@ I recommend using the latest release of Go.
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
| input size | purego | asm |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# Small convenience script for running the tests with various combinations of
|
||||
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||
|
||||
go test ./...
|
||||
go test -tags purego ./...
|
||||
GOARCH=arm64 go test
|
||||
GOARCH=arm64 go test -tags purego
|
||||
47
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
47
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -16,19 +16,11 @@ const (
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array of the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
@@ -50,10 +42,10 @@ func New() *Digest {
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v1 = primes[0] + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.v4 = -primes[0]
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
copy(memleft, b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
c := copy(memleft, b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
b = b[c:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
|
||||
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -1,215 +1,209 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
SUBQ $32, end
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
CMPQ n, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
blockLoop()
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
MOVQ ·primes+32(SB), h
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
ADDQ n, h
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
MOVQ h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
blockLoop()
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
||||
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
#define prime4 R10
|
||||
#define prime5 R11
|
||||
#define v1 R12
|
||||
#define v2 R13
|
||||
#define v3 R14
|
||||
#define v4 R15
|
||||
#define x1 R20
|
||||
#define x2 R21
|
||||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
round(v2, x2) \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
ADD x1, x2
|
||||
ROR $64-12, v3, x3
|
||||
ROR $64-18, v4, x4
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@@ -1,4 +1,5 @@
|
||||
// +build !amd64 appengine !gc purego
|
||||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v1 := primes[0] + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
v4 := -primes[0]
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
|
||||
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
3
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
3
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
@@ -11,7 +12,7 @@ import (
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
|
||||
2
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
2
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
@@ -85,7 +85,7 @@ func (v *Version) Set(version string) error {
|
||||
return fmt.Errorf("failed to validate metadata: %v", err)
|
||||
}
|
||||
|
||||
parsed := make([]int64, 3, 3)
|
||||
parsed := make([]int64, 3)
|
||||
|
||||
for i, v := range dotParts[:3] {
|
||||
val, err := strconv.ParseInt(v, 10, 64)
|
||||
|
||||
52
vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
generated
vendored
52
vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
generated
vendored
@@ -69,6 +69,58 @@ func Enabled() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// StderrIsJournalStream returns whether the process stderr is connected
|
||||
// to the Journal's stream transport.
|
||||
//
|
||||
// This can be used for automatic protocol upgrading described in [Journal Native Protocol].
|
||||
//
|
||||
// Returns true if JOURNAL_STREAM environment variable is present,
|
||||
// and stderr's device and inode numbers match it.
|
||||
//
|
||||
// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
|
||||
// is present, but malformed, fstat syscall fails, etc.
|
||||
//
|
||||
// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading
|
||||
func StderrIsJournalStream() (bool, error) {
|
||||
return fdIsJournalStream(syscall.Stderr)
|
||||
}
|
||||
|
||||
// StdoutIsJournalStream returns whether the process stdout is connected
|
||||
// to the Journal's stream transport.
|
||||
//
|
||||
// Returns true if JOURNAL_STREAM environment variable is present,
|
||||
// and stdout's device and inode numbers match it.
|
||||
//
|
||||
// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
|
||||
// is present, but malformed, fstat syscall fails, etc.
|
||||
//
|
||||
// Most users should probably use [StderrIsJournalStream].
|
||||
func StdoutIsJournalStream() (bool, error) {
|
||||
return fdIsJournalStream(syscall.Stdout)
|
||||
}
|
||||
|
||||
func fdIsJournalStream(fd int) (bool, error) {
|
||||
journalStream := os.Getenv("JOURNAL_STREAM")
|
||||
if journalStream == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var expectedStat syscall.Stat_t
|
||||
_, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err)
|
||||
}
|
||||
|
||||
var stat syscall.Stat_t
|
||||
err = syscall.Fstat(fd, &stat)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino
|
||||
return match, nil
|
||||
}
|
||||
|
||||
// Send a message to the local systemd journal. vars is a map of journald
|
||||
// fields to values. Fields must be composed of uppercase letters, numbers,
|
||||
// and underscores, but must not start with an underscore. Within these
|
||||
|
||||
8
vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
generated
vendored
8
vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
generated
vendored
@@ -33,3 +33,11 @@ func Enabled() bool {
|
||||
func Send(message string, priority Priority, vars map[string]string) error {
|
||||
return errors.New("could not initialize socket to journald")
|
||||
}
|
||||
|
||||
func StderrIsJournalStream() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func StdoutIsJournalStream() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
42
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
42
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
@@ -568,29 +568,6 @@ func (p Patch) replace(doc *container, op Operation) error {
|
||||
return errors.Wrapf(err, "replace operation failed to decode path")
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
val := op.value()
|
||||
|
||||
if val.which == eRaw {
|
||||
if !val.tryDoc() {
|
||||
if !val.tryAry() {
|
||||
return errors.Wrapf(err, "replace operation value must be object or array")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch val.which {
|
||||
case eAry:
|
||||
*doc = &val.ary
|
||||
case eDoc:
|
||||
*doc = &val.doc
|
||||
case eRaw:
|
||||
return errors.Wrapf(err, "replace operation hit impossible case")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
@@ -657,25 +634,6 @@ func (p Patch) test(doc *container, op Operation) error {
|
||||
return errors.Wrapf(err, "test operation failed to decode path")
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
var self lazyNode
|
||||
|
||||
switch sv := (*doc).(type) {
|
||||
case *partialDoc:
|
||||
self.doc = *sv
|
||||
self.which = eDoc
|
||||
case *partialArray:
|
||||
self.ary = *sv
|
||||
self.which = eAry
|
||||
}
|
||||
|
||||
if self.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
|
||||
3
vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
3
vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
@@ -6,7 +6,6 @@ linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asciicheck
|
||||
- deadcode
|
||||
- errcheck
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
@@ -18,10 +17,8 @@ linters:
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
|
||||
32
vendor/github.com/go-logr/logr/discard.go
generated
vendored
32
vendor/github.com/go-logr/logr/discard.go
generated
vendored
@@ -20,35 +20,5 @@ package logr
|
||||
// used whenever the caller is not interested in the logs. Logger instances
|
||||
// produced by this function always compare as equal.
|
||||
func Discard() Logger {
|
||||
return Logger{
|
||||
level: 0,
|
||||
sink: discardLogSink{},
|
||||
}
|
||||
}
|
||||
|
||||
// discardLogSink is a LogSink that discards all messages.
|
||||
type discardLogSink struct{}
|
||||
|
||||
// Verify that it actually implements the interface
|
||||
var _ LogSink = discardLogSink{}
|
||||
|
||||
func (l discardLogSink) Init(RuntimeInfo) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) Enabled(int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (l discardLogSink) Info(int, string, ...interface{}) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) Error(error, string, ...interface{}) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) WithValues(...interface{}) LogSink {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l discardLogSink) WithName(string) LogSink {
|
||||
return l
|
||||
return New(nil)
|
||||
}
|
||||
|
||||
27
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
27
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
@@ -21,13 +21,13 @@ limitations under the License.
|
||||
// github.com/go-logr/logr.LogSink with output through an arbitrary
|
||||
// "write" function. See New and NewJSON for details.
|
||||
//
|
||||
// Custom LogSinks
|
||||
// # Custom LogSinks
|
||||
//
|
||||
// For users who need more control, a funcr.Formatter can be embedded inside
|
||||
// your own custom LogSink implementation. This is useful when the LogSink
|
||||
// needs to implement additional methods, for example.
|
||||
//
|
||||
// Formatting
|
||||
// # Formatting
|
||||
//
|
||||
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
|
||||
// values which are being logged. When rendering a struct, funcr will use Go's
|
||||
@@ -37,6 +37,7 @@ package funcr
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -217,7 +218,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||
prefix: "",
|
||||
values: nil,
|
||||
depth: 0,
|
||||
opts: opts,
|
||||
opts: &opts,
|
||||
}
|
||||
return f
|
||||
}
|
||||
@@ -231,7 +232,7 @@ type Formatter struct {
|
||||
values []interface{}
|
||||
valuesStr string
|
||||
depth int
|
||||
opts Options
|
||||
opts *Options
|
||||
}
|
||||
|
||||
// outputFormat indicates which outputFormat to use.
|
||||
@@ -447,6 +448,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
printComma := false // testing i>0 is not enough because of JSON omitted fields
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fld := t.Field(i)
|
||||
if fld.PkgPath != "" {
|
||||
@@ -478,9 +480,10 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
|
||||
if omitempty && isEmpty(v.Field(i)) {
|
||||
continue
|
||||
}
|
||||
if i > 0 {
|
||||
if printComma {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
printComma = true // if we got here, we are rendering a field
|
||||
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
|
||||
continue
|
||||
@@ -500,6 +503,20 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
|
||||
}
|
||||
return buf.String()
|
||||
case reflect.Slice, reflect.Array:
|
||||
// If this is outputing as JSON make sure this isn't really a json.RawMessage.
|
||||
// If so just emit "as-is" and don't pretty it as that will just print
|
||||
// it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
|
||||
if f.outputFormat == outputJSON {
|
||||
if rm, ok := value.(json.RawMessage); ok {
|
||||
// If it's empty make sure we emit an empty value as the array style would below.
|
||||
if len(rm) > 0 {
|
||||
buf.Write(rm)
|
||||
} else {
|
||||
buf.WriteString("null")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
}
|
||||
buf.WriteByte('[')
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i > 0 {
|
||||
|
||||
166
vendor/github.com/go-logr/logr/logr.go
generated
vendored
166
vendor/github.com/go-logr/logr/logr.go
generated
vendored
@@ -21,7 +21,7 @@ limitations under the License.
|
||||
// to back that API. Packages in the Go ecosystem can depend on this package,
|
||||
// while callers can implement logging with whatever backend is appropriate.
|
||||
//
|
||||
// Usage
|
||||
// # Usage
|
||||
//
|
||||
// Logging is done using a Logger instance. Logger is a concrete type with
|
||||
// methods, which defers the actual logging to a LogSink interface. The main
|
||||
@@ -30,16 +30,20 @@ limitations under the License.
|
||||
// "structured logging".
|
||||
//
|
||||
// With Go's standard log package, we might write:
|
||||
// log.Printf("setting target value %s", targetValue)
|
||||
//
|
||||
// log.Printf("setting target value %s", targetValue)
|
||||
//
|
||||
// With logr's structured logging, we'd write:
|
||||
// logger.Info("setting target", "value", targetValue)
|
||||
//
|
||||
// logger.Info("setting target", "value", targetValue)
|
||||
//
|
||||
// Errors are much the same. Instead of:
|
||||
// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
|
||||
//
|
||||
// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
|
||||
//
|
||||
// We'd write:
|
||||
// logger.Error(err, "failed to open the pod bay door", "user", user)
|
||||
//
|
||||
// logger.Error(err, "failed to open the pod bay door", "user", user)
|
||||
//
|
||||
// Info() and Error() are very similar, but they are separate methods so that
|
||||
// LogSink implementations can choose to do things like attach additional
|
||||
@@ -47,7 +51,7 @@ limitations under the License.
|
||||
// always logged, regardless of the current verbosity. If there is no error
|
||||
// instance available, passing nil is valid.
|
||||
//
|
||||
// Verbosity
|
||||
// # Verbosity
|
||||
//
|
||||
// Often we want to log information only when the application in "verbose
|
||||
// mode". To write log lines that are more verbose, Logger has a V() method.
|
||||
@@ -58,20 +62,22 @@ limitations under the License.
|
||||
// Error messages do not have a verbosity level and are always logged.
|
||||
//
|
||||
// Where we might have written:
|
||||
// if flVerbose >= 2 {
|
||||
// log.Printf("an unusual thing happened")
|
||||
// }
|
||||
//
|
||||
// if flVerbose >= 2 {
|
||||
// log.Printf("an unusual thing happened")
|
||||
// }
|
||||
//
|
||||
// We can write:
|
||||
// logger.V(2).Info("an unusual thing happened")
|
||||
//
|
||||
// Logger Names
|
||||
// logger.V(2).Info("an unusual thing happened")
|
||||
//
|
||||
// # Logger Names
|
||||
//
|
||||
// Logger instances can have name strings so that all messages logged through
|
||||
// that instance have additional context. For example, you might want to add
|
||||
// a subsystem name:
|
||||
//
|
||||
// logger.WithName("compactor").Info("started", "time", time.Now())
|
||||
// logger.WithName("compactor").Info("started", "time", time.Now())
|
||||
//
|
||||
// The WithName() method returns a new Logger, which can be passed to
|
||||
// constructors or other functions for further use. Repeated use of WithName()
|
||||
@@ -82,25 +88,27 @@ limitations under the License.
|
||||
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
|
||||
// quotes, etc).
|
||||
//
|
||||
// Saved Values
|
||||
// # Saved Values
|
||||
//
|
||||
// Logger instances can store any number of key/value pairs, which will be
|
||||
// logged alongside all messages logged through that instance. For example,
|
||||
// you might want to create a Logger instance per managed object:
|
||||
//
|
||||
// With the standard log package, we might write:
|
||||
// log.Printf("decided to set field foo to value %q for object %s/%s",
|
||||
// targetValue, object.Namespace, object.Name)
|
||||
//
|
||||
// log.Printf("decided to set field foo to value %q for object %s/%s",
|
||||
// targetValue, object.Namespace, object.Name)
|
||||
//
|
||||
// With logr we'd write:
|
||||
// // Elsewhere: set up the logger to log the object name.
|
||||
// obj.logger = mainLogger.WithValues(
|
||||
// "name", obj.name, "namespace", obj.namespace)
|
||||
//
|
||||
// // later on...
|
||||
// obj.logger.Info("setting foo", "value", targetValue)
|
||||
// // Elsewhere: set up the logger to log the object name.
|
||||
// obj.logger = mainLogger.WithValues(
|
||||
// "name", obj.name, "namespace", obj.namespace)
|
||||
//
|
||||
// Best Practices
|
||||
// // later on...
|
||||
// obj.logger.Info("setting foo", "value", targetValue)
|
||||
//
|
||||
// # Best Practices
|
||||
//
|
||||
// Logger has very few hard rules, with the goal that LogSink implementations
|
||||
// might have a lot of freedom to differentiate. There are, however, some
|
||||
@@ -124,15 +132,15 @@ limitations under the License.
|
||||
// around. For cases where passing a logger is optional, a pointer to Logger
|
||||
// should be used.
|
||||
//
|
||||
// Key Naming Conventions
|
||||
// # Key Naming Conventions
|
||||
//
|
||||
// Keys are not strictly required to conform to any specification or regex, but
|
||||
// it is recommended that they:
|
||||
// * be human-readable and meaningful (not auto-generated or simple ordinals)
|
||||
// * be constant (not dependent on input data)
|
||||
// * contain only printable characters
|
||||
// * not contain whitespace or punctuation
|
||||
// * use lower case for simple keys and lowerCamelCase for more complex ones
|
||||
// - be human-readable and meaningful (not auto-generated or simple ordinals)
|
||||
// - be constant (not dependent on input data)
|
||||
// - contain only printable characters
|
||||
// - not contain whitespace or punctuation
|
||||
// - use lower case for simple keys and lowerCamelCase for more complex ones
|
||||
//
|
||||
// These guidelines help ensure that log data is processed properly regardless
|
||||
// of the log implementation. For example, log implementations will try to
|
||||
@@ -141,51 +149,54 @@ limitations under the License.
|
||||
// While users are generally free to use key names of their choice, it's
|
||||
// generally best to avoid using the following keys, as they're frequently used
|
||||
// by implementations:
|
||||
// * "caller": the calling information (file/line) of a particular log line
|
||||
// * "error": the underlying error value in the `Error` method
|
||||
// * "level": the log level
|
||||
// * "logger": the name of the associated logger
|
||||
// * "msg": the log message
|
||||
// * "stacktrace": the stack trace associated with a particular log line or
|
||||
// error (often from the `Error` message)
|
||||
// * "ts": the timestamp for a log line
|
||||
// - "caller": the calling information (file/line) of a particular log line
|
||||
// - "error": the underlying error value in the `Error` method
|
||||
// - "level": the log level
|
||||
// - "logger": the name of the associated logger
|
||||
// - "msg": the log message
|
||||
// - "stacktrace": the stack trace associated with a particular log line or
|
||||
// error (often from the `Error` message)
|
||||
// - "ts": the timestamp for a log line
|
||||
//
|
||||
// Implementations are encouraged to make use of these keys to represent the
|
||||
// above concepts, when necessary (for example, in a pure-JSON output form, it
|
||||
// would be necessary to represent at least message and timestamp as ordinary
|
||||
// named values).
|
||||
//
|
||||
// Break Glass
|
||||
// # Break Glass
|
||||
//
|
||||
// Implementations may choose to give callers access to the underlying
|
||||
// logging implementation. The recommended pattern for this is:
|
||||
// // Underlier exposes access to the underlying logging implementation.
|
||||
// // Since callers only have a logr.Logger, they have to know which
|
||||
// // implementation is in use, so this interface is less of an abstraction
|
||||
// // and more of way to test type conversion.
|
||||
// type Underlier interface {
|
||||
// GetUnderlying() <underlying-type>
|
||||
// }
|
||||
//
|
||||
// // Underlier exposes access to the underlying logging implementation.
|
||||
// // Since callers only have a logr.Logger, they have to know which
|
||||
// // implementation is in use, so this interface is less of an abstraction
|
||||
// // and more of way to test type conversion.
|
||||
// type Underlier interface {
|
||||
// GetUnderlying() <underlying-type>
|
||||
// }
|
||||
//
|
||||
// Logger grants access to the sink to enable type assertions like this:
|
||||
// func DoSomethingWithImpl(log logr.Logger) {
|
||||
// if underlier, ok := log.GetSink()(impl.Underlier) {
|
||||
// implLogger := underlier.GetUnderlying()
|
||||
// ...
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func DoSomethingWithImpl(log logr.Logger) {
|
||||
// if underlier, ok := log.GetSink().(impl.Underlier); ok {
|
||||
// implLogger := underlier.GetUnderlying()
|
||||
// ...
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Custom `With*` functions can be implemented by copying the complete
|
||||
// Logger struct and replacing the sink in the copy:
|
||||
// // WithFooBar changes the foobar parameter in the log sink and returns a
|
||||
// // new logger with that modified sink. It does nothing for loggers where
|
||||
// // the sink doesn't support that parameter.
|
||||
// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
|
||||
// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
|
||||
// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
|
||||
// }
|
||||
// return log
|
||||
// }
|
||||
//
|
||||
// // WithFooBar changes the foobar parameter in the log sink and returns a
|
||||
// // new logger with that modified sink. It does nothing for loggers where
|
||||
// // the sink doesn't support that parameter.
|
||||
// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
|
||||
// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
|
||||
// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
|
||||
// }
|
||||
// return log
|
||||
// }
|
||||
//
|
||||
// Don't use New to construct a new Logger with a LogSink retrieved from an
|
||||
// existing Logger. Source code attribution might not work correctly and
|
||||
@@ -201,11 +212,14 @@ import (
|
||||
)
|
||||
|
||||
// New returns a new Logger instance. This is primarily used by libraries
|
||||
// implementing LogSink, rather than end users.
|
||||
// implementing LogSink, rather than end users. Passing a nil sink will create
|
||||
// a Logger which discards all log lines.
|
||||
func New(sink LogSink) Logger {
|
||||
logger := Logger{}
|
||||
logger.setSink(sink)
|
||||
sink.Init(runtimeInfo)
|
||||
if sink != nil {
|
||||
sink.Init(runtimeInfo)
|
||||
}
|
||||
return logger
|
||||
}
|
||||
|
||||
@@ -244,7 +258,7 @@ type Logger struct {
|
||||
// Enabled tests whether this Logger is enabled. For example, commandline
|
||||
// flags might be used to set the logging verbosity and disable some info logs.
|
||||
func (l Logger) Enabled() bool {
|
||||
return l.sink.Enabled(l.level)
|
||||
return l.sink != nil && l.sink.Enabled(l.level)
|
||||
}
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
@@ -254,6 +268,9 @@ func (l Logger) Enabled() bool {
|
||||
// information. The key/value pairs must alternate string keys and arbitrary
|
||||
// values.
|
||||
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
|
||||
if l.sink == nil {
|
||||
return
|
||||
}
|
||||
if l.Enabled() {
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
@@ -273,6 +290,9 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
|
||||
// triggered this log line, if present. The err parameter is optional
|
||||
// and nil may be passed instead of an error instance.
|
||||
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
if l.sink == nil {
|
||||
return
|
||||
}
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
}
|
||||
@@ -284,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
// level means a log message is less important. Negative V-levels are treated
|
||||
// as 0.
|
||||
func (l Logger) V(level int) Logger {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
if level < 0 {
|
||||
level = 0
|
||||
}
|
||||
@@ -294,6 +317,9 @@ func (l Logger) V(level int) Logger {
|
||||
// WithValues returns a new Logger instance with additional key/value pairs.
|
||||
// See Info for documentation on how key/value pairs work.
|
||||
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
l.setSink(l.sink.WithValues(keysAndValues...))
|
||||
return l
|
||||
}
|
||||
@@ -304,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
|
||||
// contain only letters, digits, and hyphens (see the package documentation for
|
||||
// more information).
|
||||
func (l Logger) WithName(name string) Logger {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
l.setSink(l.sink.WithName(name))
|
||||
return l
|
||||
}
|
||||
@@ -324,6 +353,9 @@ func (l Logger) WithName(name string) Logger {
|
||||
// WithCallDepth(1) because it works with implementions that support the
|
||||
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
|
||||
func (l Logger) WithCallDepth(depth int) Logger {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||
l.setSink(withCallDepth.WithCallDepth(depth))
|
||||
}
|
||||
@@ -345,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger {
|
||||
// implementation does not support either of these, the original Logger will be
|
||||
// returned.
|
||||
func (l Logger) WithCallStackHelper() (func(), Logger) {
|
||||
if l.sink == nil {
|
||||
return func() {}, l
|
||||
}
|
||||
var helper func()
|
||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||
l.setSink(withCallDepth.WithCallDepth(1))
|
||||
@@ -357,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) {
|
||||
return helper, l
|
||||
}
|
||||
|
||||
// IsZero returns true if this logger is an uninitialized zero value
|
||||
func (l Logger) IsZero() bool {
|
||||
return l.sink == nil
|
||||
}
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context.
|
||||
type contextKey struct{}
|
||||
|
||||
@@ -442,7 +482,7 @@ type LogSink interface {
|
||||
WithName(name string) LogSink
|
||||
}
|
||||
|
||||
// CallDepthLogSink represents a Logger that knows how to climb the call stack
|
||||
// CallDepthLogSink represents a LogSink that knows how to climb the call stack
|
||||
// to identify the original call site and can offset the depth by a specified
|
||||
// number of frames. This is useful for users who have helper functions
|
||||
// between the "real" call site and the actual calls to Logger methods.
|
||||
@@ -467,7 +507,7 @@ type CallDepthLogSink interface {
|
||||
WithCallDepth(depth int) LogSink
|
||||
}
|
||||
|
||||
// CallStackHelperLogSink represents a Logger that knows how to climb
|
||||
// CallStackHelperLogSink represents a LogSink that knows how to climb
|
||||
// the call stack to identify the original call site and can skip
|
||||
// intermediate helper functions if they mark themselves as
|
||||
// helper. Go's testing package uses that approach.
|
||||
|
||||
5
vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
generated
vendored
5
vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
generated
vendored
@@ -26,11 +26,16 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
||||
// - FlagLowercaseHost
|
||||
// - FlagRemoveDefaultPort
|
||||
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
|
||||
//
|
||||
// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment.
|
||||
func NormalizeURL(u *url.URL) {
|
||||
lowercaseScheme(u)
|
||||
lowercaseHost(u)
|
||||
removeDefaultPort(u)
|
||||
removeDuplicateSlashes(u)
|
||||
|
||||
u.RawPath = ""
|
||||
u.RawFragment = ""
|
||||
}
|
||||
|
||||
func lowercaseScheme(u *url.URL) {
|
||||
|
||||
5
vendor/github.com/google/cel-go/cel/BUILD.bazel
generated
vendored
5
vendor/github.com/google/cel-go/cel/BUILD.bazel
generated
vendored
@@ -23,6 +23,7 @@ go_library(
|
||||
"//checker/decls:go_default_library",
|
||||
"//common:go_default_library",
|
||||
"//common/containers:go_default_library",
|
||||
"//common/operators:go_default_library",
|
||||
"//common/overloads:go_default_library",
|
||||
"//common/types:go_default_library",
|
||||
"//common/types/pb:go_default_library",
|
||||
@@ -31,7 +32,7 @@ go_library(
|
||||
"//interpreter:go_default_library",
|
||||
"//interpreter/functions:go_default_library",
|
||||
"//parser:go_default_library",
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//reflect/protodesc:go_default_library",
|
||||
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
|
||||
@@ -69,7 +70,7 @@ go_test(
|
||||
"//test/proto2pb:go_default_library",
|
||||
"//test/proto3pb:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
|
||||
],
|
||||
|
||||
79
vendor/github.com/google/cel-go/cel/decls.go
generated
vendored
79
vendor/github.com/google/cel-go/cel/decls.go
generated
vendored
@@ -139,7 +139,7 @@ var (
|
||||
kind: TypeKind,
|
||||
runtimeType: types.TypeType,
|
||||
}
|
||||
//UintType represents a uint type.
|
||||
// UintType represents a uint type.
|
||||
UintType = &Type{
|
||||
kind: UintKind,
|
||||
runtimeType: types.UintType,
|
||||
@@ -222,7 +222,8 @@ func (t *Type) equals(other *Type) bool {
|
||||
// - The from types are the same instance
|
||||
// - The target type is dynamic
|
||||
// - The fromType has the same kind and type name as the target type, and all parameters of the target type
|
||||
// are IsAssignableType() from the parameters of the fromType.
|
||||
//
|
||||
// are IsAssignableType() from the parameters of the fromType.
|
||||
func (t *Type) defaultIsAssignableType(fromType *Type) bool {
|
||||
if t == fromType || t.isDyn() {
|
||||
return true
|
||||
@@ -312,6 +313,11 @@ func NullableType(wrapped *Type) *Type {
|
||||
}
|
||||
}
|
||||
|
||||
// OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
|
||||
func OptionalType(param *Type) *Type {
|
||||
return OpaqueType("optional", param)
|
||||
}
|
||||
|
||||
// OpaqueType creates an abstract parameterized type with a given name.
|
||||
func OpaqueType(name string, params ...*Type) *Type {
|
||||
return &Type{
|
||||
@@ -365,7 +371,9 @@ func Variable(name string, t *Type) EnvOption {
|
||||
//
|
||||
// - Overloads are searched in the order they are declared
|
||||
// - Dynamic dispatch for lists and maps is limited by inspection of the list and map contents
|
||||
// at runtime. Empty lists and maps will result in a 'default dispatch'
|
||||
//
|
||||
// at runtime. Empty lists and maps will result in a 'default dispatch'
|
||||
//
|
||||
// - In the event that a default dispatch occurs, the first overload provided is the one invoked
|
||||
//
|
||||
// If you intend to use overloads which differentiate based on the key or element type of a list or
|
||||
@@ -405,7 +413,7 @@ func Function(name string, opts ...FunctionOpt) EnvOption {
|
||||
// FunctionOpt defines a functional option for configuring a function declaration.
|
||||
type FunctionOpt func(*functionDecl) (*functionDecl, error)
|
||||
|
||||
// SingletonUnaryBinding creates a singleton function defintion to be used for all function overloads.
|
||||
// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
|
||||
//
|
||||
// Note, this approach works well if operand is expected to have a specific trait which it implements,
|
||||
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
|
||||
@@ -431,7 +439,17 @@ func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
|
||||
//
|
||||
// Note, this approach works well if operand is expected to have a specific trait which it implements,
|
||||
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
|
||||
//
|
||||
// Deprecated: use SingletonBinaryBinding
|
||||
func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt {
|
||||
return SingletonBinaryBinding(fn, traits...)
|
||||
}
|
||||
|
||||
// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads.
|
||||
//
|
||||
// Note, this approach works well if operand is expected to have a specific trait which it implements,
|
||||
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
|
||||
func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt {
|
||||
trait := 0
|
||||
for _, t := range traits {
|
||||
trait = trait | t
|
||||
@@ -453,7 +471,17 @@ func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt {
|
||||
//
|
||||
// Note, this approach works well if operand is expected to have a specific trait which it implements,
|
||||
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
|
||||
//
|
||||
// Deprecated: use SingletonFunctionBinding
|
||||
func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt {
|
||||
return SingletonFunctionBinding(fn, traits...)
|
||||
}
|
||||
|
||||
// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads.
|
||||
//
|
||||
// Note, this approach works well if operand is expected to have a specific trait which it implements,
|
||||
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
|
||||
func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt {
|
||||
trait := 0
|
||||
for _, t := range traits {
|
||||
trait = trait | t
|
||||
@@ -720,9 +748,8 @@ func (f *functionDecl) addOverload(overload *overloadDecl) error {
|
||||
// Allow redefinition of an overload implementation so long as the signatures match.
|
||||
f.overloads[index] = overload
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.name, o.id)
|
||||
}
|
||||
return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.name, o.id)
|
||||
}
|
||||
}
|
||||
f.overloads = append(f.overloads, overload)
|
||||
@@ -1177,3 +1204,43 @@ func collectParamNames(paramNames map[string]struct{}, arg *Type) {
|
||||
collectParamNames(paramNames, param)
|
||||
}
|
||||
}
|
||||
|
||||
func typeValueToKind(tv *types.TypeValue) (Kind, error) {
|
||||
switch tv {
|
||||
case types.BoolType:
|
||||
return BoolKind, nil
|
||||
case types.DoubleType:
|
||||
return DoubleKind, nil
|
||||
case types.IntType:
|
||||
return IntKind, nil
|
||||
case types.UintType:
|
||||
return UintKind, nil
|
||||
case types.ListType:
|
||||
return ListKind, nil
|
||||
case types.MapType:
|
||||
return MapKind, nil
|
||||
case types.StringType:
|
||||
return StringKind, nil
|
||||
case types.BytesType:
|
||||
return BytesKind, nil
|
||||
case types.DurationType:
|
||||
return DurationKind, nil
|
||||
case types.TimestampType:
|
||||
return TimestampKind, nil
|
||||
case types.NullType:
|
||||
return NullTypeKind, nil
|
||||
case types.TypeType:
|
||||
return TypeKind, nil
|
||||
default:
|
||||
switch tv.TypeName() {
|
||||
case "dyn":
|
||||
return DynKind, nil
|
||||
case "google.protobuf.Any":
|
||||
return AnyKind, nil
|
||||
case "optional":
|
||||
return OpaqueKind, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("no known conversion for type of %s", tv.TypeName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
126
vendor/github.com/google/cel-go/cel/env.go
generated
vendored
126
vendor/github.com/google/cel-go/cel/env.go
generated
vendored
@@ -102,15 +102,18 @@ type Env struct {
|
||||
provider ref.TypeProvider
|
||||
features map[int]bool
|
||||
appliedFeatures map[int]bool
|
||||
libraries map[string]bool
|
||||
|
||||
// Internal parser representation
|
||||
prsr *parser.Parser
|
||||
prsr *parser.Parser
|
||||
prsrOpts []parser.Option
|
||||
|
||||
// Internal checker representation
|
||||
chk *checker.Env
|
||||
chkErr error
|
||||
chkOnce sync.Once
|
||||
chkOpts []checker.Option
|
||||
chkMutex sync.Mutex
|
||||
chk *checker.Env
|
||||
chkErr error
|
||||
chkOnce sync.Once
|
||||
chkOpts []checker.Option
|
||||
|
||||
// Program options tied to the environment
|
||||
progOpts []ProgramOption
|
||||
@@ -159,6 +162,7 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
|
||||
provider: registry,
|
||||
features: map[int]bool{},
|
||||
appliedFeatures: map[int]bool{},
|
||||
libraries: map[string]bool{},
|
||||
progOpts: []ProgramOption{},
|
||||
}).configure(opts)
|
||||
}
|
||||
@@ -175,14 +179,14 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
|
||||
pe, _ := AstToParsedExpr(ast)
|
||||
|
||||
// Construct the internal checker env, erroring if there is an issue adding the declarations.
|
||||
err := e.initChecker()
|
||||
chk, err := e.initChecker()
|
||||
if err != nil {
|
||||
errs := common.NewErrors(ast.Source())
|
||||
errs.ReportError(common.NoLocation, e.chkErr.Error())
|
||||
errs.ReportError(common.NoLocation, err.Error())
|
||||
return nil, NewIssues(errs)
|
||||
}
|
||||
|
||||
res, errs := checker.Check(pe, ast.Source(), e.chk)
|
||||
res, errs := checker.Check(pe, ast.Source(), chk)
|
||||
if len(errs.GetErrors()) > 0 {
|
||||
return nil, NewIssues(errs)
|
||||
}
|
||||
@@ -236,10 +240,14 @@ func (e *Env) CompileSource(src Source) (*Ast, *Issues) {
|
||||
// TypeProvider are immutable, or that their underlying implementations are based on the
|
||||
// ref.TypeRegistry which provides a Copy method which will be invoked by this method.
|
||||
func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
|
||||
if e.chkErr != nil {
|
||||
return nil, e.chkErr
|
||||
chk, chkErr := e.getCheckerOrError()
|
||||
if chkErr != nil {
|
||||
return nil, chkErr
|
||||
}
|
||||
|
||||
prsrOptsCopy := make([]parser.Option, len(e.prsrOpts))
|
||||
copy(prsrOptsCopy, e.prsrOpts)
|
||||
|
||||
// The type-checker is configured with Declarations. The declarations may either be provided
|
||||
// as options which have not yet been validated, or may come from a previous checker instance
|
||||
// whose types have already been validated.
|
||||
@@ -248,10 +256,10 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
|
||||
|
||||
// Copy the declarations if needed.
|
||||
decsCopy := []*exprpb.Decl{}
|
||||
if e.chk != nil {
|
||||
if chk != nil {
|
||||
// If the type-checker has already been instantiated, then the e.declarations have been
|
||||
// valdiated within the chk instance.
|
||||
chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(e.chk))
|
||||
// validated within the chk instance.
|
||||
chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk))
|
||||
} else {
|
||||
// If the type-checker has not been instantiated, ensure the unvalidated declarations are
|
||||
// provided to the extended Env instance.
|
||||
@@ -304,8 +312,11 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
|
||||
for k, v := range e.functions {
|
||||
funcsCopy[k] = v
|
||||
}
|
||||
libsCopy := make(map[string]bool, len(e.libraries))
|
||||
for k, v := range e.libraries {
|
||||
libsCopy[k] = v
|
||||
}
|
||||
|
||||
// TODO: functions copy needs to happen here.
|
||||
ext := &Env{
|
||||
Container: e.Container,
|
||||
declarations: decsCopy,
|
||||
@@ -315,8 +326,10 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
|
||||
adapter: adapter,
|
||||
features: featuresCopy,
|
||||
appliedFeatures: appliedFeaturesCopy,
|
||||
libraries: libsCopy,
|
||||
provider: provider,
|
||||
chkOpts: chkOptsCopy,
|
||||
prsrOpts: prsrOptsCopy,
|
||||
}
|
||||
return ext.configure(opts)
|
||||
}
|
||||
@@ -328,6 +341,12 @@ func (e *Env) HasFeature(flag int) bool {
|
||||
return has && enabled
|
||||
}
|
||||
|
||||
// HasLibrary returns whether a specific SingletonLibrary has been configured in the environment.
|
||||
func (e *Env) HasLibrary(libName string) bool {
|
||||
configured, exists := e.libraries[libName]
|
||||
return exists && configured
|
||||
}
|
||||
|
||||
// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
|
||||
//
|
||||
// This form of Parse creates a Source value for the input `txt` and forwards to the
|
||||
@@ -422,8 +441,8 @@ func (e *Env) UnknownVars() interpreter.PartialActivation {
|
||||
// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
|
||||
// Ast format and then Program again.
|
||||
func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
|
||||
pruned := interpreter.PruneAst(a.Expr(), details.State())
|
||||
expr, err := AstToString(ParsedExprToAst(&exprpb.ParsedExpr{Expr: pruned}))
|
||||
pruned := interpreter.PruneAst(a.Expr(), a.SourceInfo().GetMacroCalls(), details.State())
|
||||
expr, err := AstToString(ParsedExprToAst(pruned))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -443,12 +462,12 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
|
||||
|
||||
// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
|
||||
// extension functions provided by estimator.
|
||||
func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator) (checker.CostEstimate, error) {
|
||||
func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) {
|
||||
checked, err := AstToCheckedExpr(ast)
|
||||
if err != nil {
|
||||
return checker.CostEstimate{}, fmt.Errorf("EsimateCost could not inspect Ast: %v", err)
|
||||
}
|
||||
return checker.Cost(checked, estimator), nil
|
||||
return checker.Cost(checked, estimator, opts...)
|
||||
}
|
||||
|
||||
// configure applies a series of EnvOptions to the current environment.
|
||||
@@ -464,17 +483,9 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
|
||||
}
|
||||
|
||||
// If the default UTC timezone fix has been enabled, make sure the library is configured
|
||||
if e.HasFeature(featureDefaultUTCTimeZone) {
|
||||
if _, found := e.appliedFeatures[featureDefaultUTCTimeZone]; !found {
|
||||
e, err = Lib(timeUTCLibrary{})(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// record that the feature has been applied since it will generate declarations
|
||||
// and functions which will be propagated on Extend() calls and which should only
|
||||
// be registered once.
|
||||
e.appliedFeatures[featureDefaultUTCTimeZone] = true
|
||||
}
|
||||
e, err = e.maybeApplyFeature(featureDefaultUTCTimeZone, Lib(timeUTCLibrary{}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize all of the functions configured within the environment.
|
||||
@@ -486,7 +497,10 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
|
||||
}
|
||||
|
||||
// Configure the parser.
|
||||
prsrOpts := []parser.Option{parser.Macros(e.macros...)}
|
||||
prsrOpts := []parser.Option{}
|
||||
prsrOpts = append(prsrOpts, e.prsrOpts...)
|
||||
prsrOpts = append(prsrOpts, parser.Macros(e.macros...))
|
||||
|
||||
if e.HasFeature(featureEnableMacroCallTracking) {
|
||||
prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true))
|
||||
}
|
||||
@@ -497,7 +511,7 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
|
||||
|
||||
// Ensure that the checker init happens eagerly rather than lazily.
|
||||
if e.HasFeature(featureEagerlyValidateDeclarations) {
|
||||
err := e.initChecker()
|
||||
_, err := e.initChecker()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -506,7 +520,7 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (e *Env) initChecker() error {
|
||||
func (e *Env) initChecker() (*checker.Env, error) {
|
||||
e.chkOnce.Do(func() {
|
||||
chkOpts := []checker.Option{}
|
||||
chkOpts = append(chkOpts, e.chkOpts...)
|
||||
@@ -518,32 +532,68 @@ func (e *Env) initChecker() error {
|
||||
|
||||
ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...)
|
||||
if err != nil {
|
||||
e.chkErr = err
|
||||
e.setCheckerOrError(nil, err)
|
||||
return
|
||||
}
|
||||
// Add the statically configured declarations.
|
||||
err = ce.Add(e.declarations...)
|
||||
if err != nil {
|
||||
e.chkErr = err
|
||||
e.setCheckerOrError(nil, err)
|
||||
return
|
||||
}
|
||||
// Add the function declarations which are derived from the FunctionDecl instances.
|
||||
for _, fn := range e.functions {
|
||||
fnDecl, err := functionDeclToExprDecl(fn)
|
||||
if err != nil {
|
||||
e.chkErr = err
|
||||
e.setCheckerOrError(nil, err)
|
||||
return
|
||||
}
|
||||
err = ce.Add(fnDecl)
|
||||
if err != nil {
|
||||
e.chkErr = err
|
||||
e.setCheckerOrError(nil, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
// Add function declarations here separately.
|
||||
e.chk = ce
|
||||
e.setCheckerOrError(ce, nil)
|
||||
})
|
||||
return e.chkErr
|
||||
return e.getCheckerOrError()
|
||||
}
|
||||
|
||||
// setCheckerOrError sets the checker.Env or error state in a concurrency-safe manner
|
||||
func (e *Env) setCheckerOrError(chk *checker.Env, chkErr error) {
|
||||
e.chkMutex.Lock()
|
||||
e.chk = chk
|
||||
e.chkErr = chkErr
|
||||
e.chkMutex.Unlock()
|
||||
}
|
||||
|
||||
// getCheckerOrError gets the checker.Env or error state in a concurrency-safe manner
|
||||
func (e *Env) getCheckerOrError() (*checker.Env, error) {
|
||||
e.chkMutex.Lock()
|
||||
defer e.chkMutex.Unlock()
|
||||
return e.chk, e.chkErr
|
||||
}
|
||||
|
||||
// maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies
|
||||
// the feature if it has not already been enabled.
|
||||
func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) {
|
||||
if !e.HasFeature(feature) {
|
||||
return e, nil
|
||||
}
|
||||
_, applied := e.appliedFeatures[feature]
|
||||
if applied {
|
||||
return e, nil
|
||||
}
|
||||
e, err := option(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// record that the feature has been applied since it will generate declarations
|
||||
// and functions which will be propagated on Extend() calls and which should only
|
||||
// be registered once.
|
||||
e.appliedFeatures[feature] = true
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// Issues defines methods for inspecting the error details of parse and check calls.
|
||||
|
||||
4
vendor/github.com/google/cel-go/cel/io.go
generated
vendored
4
vendor/github.com/google/cel-go/cel/io.go
generated
vendored
@@ -19,14 +19,14 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/google/cel-go/common"
|
||||
"github.com/google/cel-go/common/types"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/common/types/traits"
|
||||
"github.com/google/cel-go/parser"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
258
vendor/github.com/google/cel-go/cel/library.go
generated
vendored
258
vendor/github.com/google/cel-go/cel/library.go
generated
vendored
@@ -20,10 +20,27 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/cel-go/checker"
|
||||
"github.com/google/cel-go/common"
|
||||
"github.com/google/cel-go/common/operators"
|
||||
"github.com/google/cel-go/common/overloads"
|
||||
"github.com/google/cel-go/common/types"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/common/types/traits"
|
||||
"github.com/google/cel-go/interpreter"
|
||||
"github.com/google/cel-go/interpreter/functions"
|
||||
"github.com/google/cel-go/parser"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
optMapMacro = "optMap"
|
||||
hasValueFunc = "hasValue"
|
||||
optionalNoneFunc = "optional.none"
|
||||
optionalOfFunc = "optional.of"
|
||||
optionalOfNonZeroValueFunc = "optional.ofNonZeroValue"
|
||||
valueFunc = "value"
|
||||
unusedIterVar = "#unused"
|
||||
)
|
||||
|
||||
// Library provides a collection of EnvOption and ProgramOption values used to configure a CEL
|
||||
@@ -42,10 +59,27 @@ type Library interface {
|
||||
ProgramOptions() []ProgramOption
|
||||
}
|
||||
|
||||
// SingletonLibrary refines the Library interface to ensure that libraries in this format are only
|
||||
// configured once within the environment.
|
||||
type SingletonLibrary interface {
|
||||
Library
|
||||
|
||||
// LibraryName provides a namespaced name which is used to check whether the library has already
|
||||
// been configured in the environment.
|
||||
LibraryName() string
|
||||
}
|
||||
|
||||
// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
|
||||
// and to be linked to each other.
|
||||
func Lib(l Library) EnvOption {
|
||||
singleton, isSingleton := l.(SingletonLibrary)
|
||||
return func(e *Env) (*Env, error) {
|
||||
if isSingleton {
|
||||
if e.HasLibrary(singleton.LibraryName()) {
|
||||
return e, nil
|
||||
}
|
||||
e.libraries[singleton.LibraryName()] = true
|
||||
}
|
||||
var err error
|
||||
for _, opt := range l.CompileOptions() {
|
||||
e, err = opt(e)
|
||||
@@ -67,6 +101,11 @@ func StdLib() EnvOption {
|
||||
// features documented in the specification.
|
||||
type stdLibrary struct{}
|
||||
|
||||
// LibraryName implements the SingletonLibrary interface method.
|
||||
func (stdLibrary) LibraryName() string {
|
||||
return "cel.lib.std"
|
||||
}
|
||||
|
||||
// EnvOptions returns options for the standard CEL function declarations and macros.
|
||||
func (stdLibrary) CompileOptions() []EnvOption {
|
||||
return []EnvOption{
|
||||
@@ -82,6 +121,225 @@ func (stdLibrary) ProgramOptions() []ProgramOption {
|
||||
}
|
||||
}
|
||||
|
||||
type optionalLibrary struct{}
|
||||
|
||||
// LibraryName implements the SingletonLibrary interface method.
|
||||
func (optionalLibrary) LibraryName() string {
|
||||
return "cel.lib.optional"
|
||||
}
|
||||
|
||||
// CompileOptions implements the Library interface method.
|
||||
func (optionalLibrary) CompileOptions() []EnvOption {
|
||||
paramTypeK := TypeParamType("K")
|
||||
paramTypeV := TypeParamType("V")
|
||||
optionalTypeV := OptionalType(paramTypeV)
|
||||
listTypeV := ListType(paramTypeV)
|
||||
mapTypeKV := MapType(paramTypeK, paramTypeV)
|
||||
|
||||
return []EnvOption{
|
||||
// Enable the optional syntax in the parser.
|
||||
enableOptionalSyntax(),
|
||||
|
||||
// Introduce the optional type.
|
||||
Types(types.OptionalType),
|
||||
|
||||
// Configure the optMap macro.
|
||||
Macros(NewReceiverMacro(optMapMacro, 2, optMap)),
|
||||
|
||||
// Global and member functions for working with optional values.
|
||||
Function(optionalOfFunc,
|
||||
Overload("optional_of", []*Type{paramTypeV}, optionalTypeV,
|
||||
UnaryBinding(func(value ref.Val) ref.Val {
|
||||
return types.OptionalOf(value)
|
||||
}))),
|
||||
Function(optionalOfNonZeroValueFunc,
|
||||
Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV,
|
||||
UnaryBinding(func(value ref.Val) ref.Val {
|
||||
v, isZeroer := value.(traits.Zeroer)
|
||||
if !isZeroer || !v.IsZeroValue() {
|
||||
return types.OptionalOf(value)
|
||||
}
|
||||
return types.OptionalNone
|
||||
}))),
|
||||
Function(optionalNoneFunc,
|
||||
Overload("optional_none", []*Type{}, optionalTypeV,
|
||||
FunctionBinding(func(values ...ref.Val) ref.Val {
|
||||
return types.OptionalNone
|
||||
}))),
|
||||
Function(valueFunc,
|
||||
MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV,
|
||||
UnaryBinding(func(value ref.Val) ref.Val {
|
||||
opt := value.(*types.Optional)
|
||||
return opt.GetValue()
|
||||
}))),
|
||||
Function(hasValueFunc,
|
||||
MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType,
|
||||
UnaryBinding(func(value ref.Val) ref.Val {
|
||||
opt := value.(*types.Optional)
|
||||
return types.Bool(opt.HasValue())
|
||||
}))),
|
||||
|
||||
// Implementation of 'or' and 'orValue' are special-cased to support short-circuiting in the
|
||||
// evaluation chain.
|
||||
Function("or",
|
||||
MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV)),
|
||||
Function("orValue",
|
||||
MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV)),
|
||||
|
||||
// OptSelect is handled specially by the type-checker, so the receiver's field type is used to determine the
|
||||
// optput type.
|
||||
Function(operators.OptSelect,
|
||||
Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV)),
|
||||
|
||||
// OptIndex is handled mostly like any other indexing operation on a list or map, so the type-checker can use
|
||||
// these signatures to determine type-agreement without any special handling.
|
||||
Function(operators.OptIndex,
|
||||
Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV),
|
||||
Overload("optional_list_optindex_optional_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
|
||||
Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV),
|
||||
Overload("optional_map_optindex_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
|
||||
|
||||
// Index overloads to accommodate using an optional value as the operand.
|
||||
Function(operators.Index,
|
||||
Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
|
||||
Overload("optional_map_index_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
|
||||
}
|
||||
}
|
||||
|
||||
func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
|
||||
varIdent := args[0]
|
||||
varName := ""
|
||||
switch varIdent.GetExprKind().(type) {
|
||||
case *exprpb.Expr_IdentExpr:
|
||||
varName = varIdent.GetIdentExpr().GetName()
|
||||
default:
|
||||
return nil, &common.Error{
|
||||
Message: "optMap() variable name must be a simple identifier",
|
||||
Location: meh.OffsetLocation(varIdent.GetId()),
|
||||
}
|
||||
}
|
||||
mapExpr := args[1]
|
||||
return meh.GlobalCall(
|
||||
operators.Conditional,
|
||||
meh.ReceiverCall(hasValueFunc, target),
|
||||
meh.GlobalCall(optionalOfFunc,
|
||||
meh.Fold(
|
||||
unusedIterVar,
|
||||
meh.NewList(),
|
||||
varName,
|
||||
meh.ReceiverCall(valueFunc, target),
|
||||
meh.LiteralBool(false),
|
||||
meh.Ident(varName),
|
||||
mapExpr,
|
||||
),
|
||||
),
|
||||
meh.GlobalCall(optionalNoneFunc),
|
||||
), nil
|
||||
}
|
||||
|
||||
// ProgramOptions implements the Library interface method.
|
||||
func (optionalLibrary) ProgramOptions() []ProgramOption {
|
||||
return []ProgramOption{
|
||||
CustomDecorator(decorateOptionalOr),
|
||||
}
|
||||
}
|
||||
|
||||
func enableOptionalSyntax() EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true))
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) {
|
||||
call, ok := i.(interpreter.InterpretableCall)
|
||||
if !ok {
|
||||
return i, nil
|
||||
}
|
||||
args := call.Args()
|
||||
if len(args) != 2 {
|
||||
return i, nil
|
||||
}
|
||||
switch call.Function() {
|
||||
case "or":
|
||||
if call.OverloadID() != "" && call.OverloadID() != "optional_or_optional" {
|
||||
return i, nil
|
||||
}
|
||||
return &evalOptionalOr{
|
||||
id: call.ID(),
|
||||
lhs: args[0],
|
||||
rhs: args[1],
|
||||
}, nil
|
||||
case "orValue":
|
||||
if call.OverloadID() != "" && call.OverloadID() != "optional_orValue_value" {
|
||||
return i, nil
|
||||
}
|
||||
return &evalOptionalOrValue{
|
||||
id: call.ID(),
|
||||
lhs: args[0],
|
||||
rhs: args[1],
|
||||
}, nil
|
||||
default:
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
// evalOptionalOr selects between two optional values, either the first if it has a value, or
|
||||
// the second optional expression is evaluated and returned.
|
||||
type evalOptionalOr struct {
|
||||
id int64
|
||||
lhs interpreter.Interpretable
|
||||
rhs interpreter.Interpretable
|
||||
}
|
||||
|
||||
// ID implements the Interpretable interface method.
|
||||
func (opt *evalOptionalOr) ID() int64 {
|
||||
return opt.id
|
||||
}
|
||||
|
||||
// Eval evaluates the left-hand side optional to determine whether it contains a value, else
|
||||
// proceeds with the right-hand side evaluation.
|
||||
func (opt *evalOptionalOr) Eval(ctx interpreter.Activation) ref.Val {
|
||||
// short-circuit lhs.
|
||||
optLHS := opt.lhs.Eval(ctx)
|
||||
optVal, ok := optLHS.(*types.Optional)
|
||||
if !ok {
|
||||
return optLHS
|
||||
}
|
||||
if optVal.HasValue() {
|
||||
return optVal
|
||||
}
|
||||
return opt.rhs.Eval(ctx)
|
||||
}
|
||||
|
||||
// evalOptionalOrValue selects between an optional or a concrete value. If the optional has a value,
|
||||
// its value is returned, otherwise the alternative value expression is evaluated and returned.
|
||||
type evalOptionalOrValue struct {
|
||||
id int64
|
||||
lhs interpreter.Interpretable
|
||||
rhs interpreter.Interpretable
|
||||
}
|
||||
|
||||
// ID implements the Interpretable interface method.
|
||||
func (opt *evalOptionalOrValue) ID() int64 {
|
||||
return opt.id
|
||||
}
|
||||
|
||||
// Eval evaluates the left-hand side optional to determine whether it contains a value, else
|
||||
// proceeds with the right-hand side evaluation.
|
||||
func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val {
|
||||
// short-circuit lhs.
|
||||
optLHS := opt.lhs.Eval(ctx)
|
||||
optVal, ok := optLHS.(*types.Optional)
|
||||
if !ok {
|
||||
return optLHS
|
||||
}
|
||||
if optVal.HasValue() {
|
||||
return optVal.GetValue()
|
||||
}
|
||||
return opt.rhs.Eval(ctx)
|
||||
}
|
||||
|
||||
type timeUTCLibrary struct{}
|
||||
|
||||
func (timeUTCLibrary) CompileOptions() []EnvOption {
|
||||
|
||||
14
vendor/github.com/google/cel-go/cel/macro.go
generated
vendored
14
vendor/github.com/google/cel-go/cel/macro.go
generated
vendored
@@ -17,6 +17,7 @@ package cel
|
||||
import (
|
||||
"github.com/google/cel-go/common"
|
||||
"github.com/google/cel-go/parser"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -26,8 +27,11 @@ import (
|
||||
// a Macro should be created per arg-count or as a var arg macro.
|
||||
type Macro = parser.Macro
|
||||
|
||||
// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree, or an error
|
||||
// if the input arguments are not suitable for the expansion requirements for the macro in question.
|
||||
// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree.
|
||||
//
|
||||
// If the MacroExpander determines within the implementation that an expansion is not needed it may return
|
||||
// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments
|
||||
// are not well-formed, the result of the expansion will be an error.
|
||||
//
|
||||
// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
|
||||
// and produces as output an Expr ast node.
|
||||
@@ -81,8 +85,10 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex
|
||||
// input to produce an output list.
|
||||
//
|
||||
// There are two call patterns supported by map:
|
||||
// <iterRange>.map(<iterVar>, <transform>)
|
||||
// <iterRange>.map(<iterVar>, <predicate>, <transform>)
|
||||
//
|
||||
// <iterRange>.map(<iterVar>, <transform>)
|
||||
// <iterRange>.map(<iterVar>, <predicate>, <transform>)
|
||||
//
|
||||
// In the second form only iterVar values which return true when provided to the predicate expression
|
||||
// are transformed.
|
||||
func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
|
||||
|
||||
64
vendor/github.com/google/cel-go/cel/options.go
generated
vendored
64
vendor/github.com/google/cel-go/cel/options.go
generated
vendored
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/interpreter"
|
||||
"github.com/google/cel-go/interpreter/functions"
|
||||
"github.com/google/cel-go/parser"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
descpb "google.golang.org/protobuf/types/descriptorpb"
|
||||
@@ -61,6 +62,10 @@ const (
|
||||
// on a CEL timestamp operation. This fixes the scenario where the input time
|
||||
// is not already in UTC.
|
||||
featureDefaultUTCTimeZone
|
||||
|
||||
// Enable the use of optional types in the syntax, type-system, type-checking,
|
||||
// and runtime.
|
||||
featureOptionalTypes
|
||||
)
|
||||
|
||||
// EnvOption is a functional interface for configuring the environment.
|
||||
@@ -163,19 +168,19 @@ func Container(name string) EnvOption {
|
||||
// Abbreviations can be useful when working with variables, functions, and especially types from
|
||||
// multiple namespaces:
|
||||
//
|
||||
// // CEL object construction
|
||||
// qual.pkg.version.ObjTypeName{
|
||||
// field: alt.container.ver.FieldTypeName{value: ...}
|
||||
// }
|
||||
// // CEL object construction
|
||||
// qual.pkg.version.ObjTypeName{
|
||||
// field: alt.container.ver.FieldTypeName{value: ...}
|
||||
// }
|
||||
//
|
||||
// Only one the qualified names above may be used as the CEL container, so at least one of these
|
||||
// references must be a long qualified name within an otherwise short CEL program. Using the
|
||||
// following abbreviations, the program becomes much simpler:
|
||||
//
|
||||
// // CEL Go option
|
||||
// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
|
||||
// // Simplified Object construction
|
||||
// ObjTypeName{field: FieldTypeName{value: ...}}
|
||||
// // CEL Go option
|
||||
// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
|
||||
// // Simplified Object construction
|
||||
// ObjTypeName{field: FieldTypeName{value: ...}}
|
||||
//
|
||||
// There are a few rules for the qualified names and the simple abbreviations generated from them:
|
||||
// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
|
||||
@@ -188,9 +193,12 @@ func Container(name string) EnvOption {
|
||||
// - Expanded abbreviations do not participate in namespace resolution.
|
||||
// - Abbreviation expansion is done instead of the container search for a matching identifier.
|
||||
// - Containers follow C++ namespace resolution rules with searches from the most qualified name
|
||||
// to the least qualified name.
|
||||
//
|
||||
// to the least qualified name.
|
||||
//
|
||||
// - Container references within the CEL program may be relative, and are resolved to fully
|
||||
// qualified names at either type-check time or program plan time, whichever comes first.
|
||||
//
|
||||
// qualified names at either type-check time or program plan time, whichever comes first.
|
||||
//
|
||||
// If there is ever a case where an identifier could be in both the container and as an
|
||||
// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
|
||||
@@ -216,7 +224,7 @@ func Abbrevs(qualifiedNames ...string) EnvOption {
|
||||
// environment by default.
|
||||
//
|
||||
// Note: This option must be specified after the CustomTypeProvider option when used together.
|
||||
func Types(addTypes ...interface{}) EnvOption {
|
||||
func Types(addTypes ...any) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
reg, isReg := e.provider.(ref.TypeRegistry)
|
||||
if !isReg {
|
||||
@@ -253,7 +261,7 @@ func Types(addTypes ...interface{}) EnvOption {
|
||||
//
|
||||
// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via
|
||||
// extension or by re-using the same EnvOption with another NewEnv() call.
|
||||
func TypeDescs(descs ...interface{}) EnvOption {
|
||||
func TypeDescs(descs ...any) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
reg, isReg := e.provider.(ref.TypeRegistry)
|
||||
if !isReg {
|
||||
@@ -350,8 +358,8 @@ func Functions(funcs ...*functions.Overload) ProgramOption {
|
||||
// variables with the same name provided to the Eval() call. If Globals is used in a Library with
|
||||
// a Lib EnvOption, vars may shadow variables provided by previously added libraries.
|
||||
//
|
||||
// The vars value may either be an `interpreter.Activation` instance or a `map[string]interface{}`.
|
||||
func Globals(vars interface{}) ProgramOption {
|
||||
// The vars value may either be an `interpreter.Activation` instance or a `map[string]any`.
|
||||
func Globals(vars any) ProgramOption {
|
||||
return func(p *prog) (*prog, error) {
|
||||
defaultVars, err := interpreter.NewActivation(vars)
|
||||
if err != nil {
|
||||
@@ -404,6 +412,9 @@ const (
|
||||
// OptTrackCost enables the runtime cost calculation while validation and return cost within evalDetails
|
||||
// cost calculation is available via func ActualCost()
|
||||
OptTrackCost EvalOption = 1 << iota
|
||||
|
||||
// OptCheckStringFormat enables compile-time checking of string.format calls for syntax/cardinality.
|
||||
OptCheckStringFormat EvalOption = 1 << iota
|
||||
)
|
||||
|
||||
// EvalOptions sets one or more evaluation options which may affect the evaluation or Result.
|
||||
@@ -534,6 +545,13 @@ func DefaultUTCTimeZone(enabled bool) EnvOption {
|
||||
return features(featureDefaultUTCTimeZone, enabled)
|
||||
}
|
||||
|
||||
// OptionalTypes enable support for optional syntax and types in CEL. The optional value type makes
|
||||
// it possible to express whether variables have been provided, whether a result has been computed,
|
||||
// and in the future whether an object field path, map key value, or list index has a value.
|
||||
func OptionalTypes() EnvOption {
|
||||
return Lib(optionalLibrary{})
|
||||
}
|
||||
|
||||
// features sets the given feature flags. See list of Feature constants above.
|
||||
func features(flag int, enabled bool) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
@@ -541,3 +559,21 @@ func features(flag int, enabled bool) EnvOption {
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ParserRecursionLimit adjusts the AST depth the parser will tolerate.
|
||||
// Defaults defined in the parser package.
|
||||
func ParserRecursionLimit(limit int) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.prsrOpts = append(e.prsrOpts, parser.MaxRecursionDepth(limit))
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ParserExpressionSizeLimit adjusts the number of code points the expression parser is allowed to parse.
|
||||
// Defaults defined in the parser package.
|
||||
func ParserExpressionSizeLimit(limit int) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.prsrOpts = append(e.prsrOpts, parser.ExpressionSizeCodePointLimit(limit))
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
114
vendor/github.com/google/cel-go/cel/program.go
generated
vendored
114
vendor/github.com/google/cel-go/cel/program.go
generated
vendored
@@ -17,21 +17,20 @@ package cel
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
|
||||
"github.com/google/cel-go/common/types"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
"github.com/google/cel-go/interpreter"
|
||||
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
)
|
||||
|
||||
// Program is an evaluable view of an Ast.
|
||||
type Program interface {
|
||||
// Eval returns the result of an evaluation of the Ast and environment against the input vars.
|
||||
//
|
||||
// The vars value may either be an `interpreter.Activation` or a `map[string]interface{}`.
|
||||
// The vars value may either be an `interpreter.Activation` or a `map[string]any`.
|
||||
//
|
||||
// If the `OptTrackState`, `OptTrackCost` or `OptExhaustiveEval` flags are used, the `details` response will
|
||||
// be non-nil. Given this caveat on `details`, the return state from evaluation will be:
|
||||
@@ -43,16 +42,16 @@ type Program interface {
|
||||
// An unsuccessful evaluation is typically the result of a series of incompatible `EnvOption`
|
||||
// or `ProgramOption` values used in the creation of the evaluation environment or executable
|
||||
// program.
|
||||
Eval(interface{}) (ref.Val, *EvalDetails, error)
|
||||
Eval(any) (ref.Val, *EvalDetails, error)
|
||||
|
||||
// ContextEval evaluates the program with a set of input variables and a context object in order
|
||||
// to support cancellation and timeouts. This method must be used in conjunction with the
|
||||
// InterruptCheckFrequency() option for cancellation interrupts to be impact evaluation.
|
||||
//
|
||||
// The vars value may either be an `interpreter.Activation` or `map[string]interface{}`.
|
||||
// The vars value may either be an `interpreter.Activation` or `map[string]any`.
|
||||
//
|
||||
// The output contract for `ContextEval` is otherwise identical to the `Eval` method.
|
||||
ContextEval(context.Context, interface{}) (ref.Val, *EvalDetails, error)
|
||||
ContextEval(context.Context, any) (ref.Val, *EvalDetails, error)
|
||||
}
|
||||
|
||||
// NoVars returns an empty Activation.
|
||||
@@ -65,7 +64,7 @@ func NoVars() interpreter.Activation {
|
||||
//
|
||||
// The `vars` value may either be an interpreter.Activation or any valid input to the
|
||||
// interpreter.NewActivation call.
|
||||
func PartialVars(vars interface{},
|
||||
func PartialVars(vars any,
|
||||
unknowns ...*interpreter.AttributePattern) (interpreter.PartialActivation, error) {
|
||||
return interpreter.NewPartialActivation(vars, unknowns...)
|
||||
}
|
||||
@@ -207,6 +206,37 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
|
||||
if len(p.regexOptimizations) > 0 {
|
||||
decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
|
||||
}
|
||||
// Enable compile-time checking of syntax/cardinality for string.format calls.
|
||||
if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat {
|
||||
var isValidType func(id int64, validTypes ...*types.TypeValue) (bool, error)
|
||||
if ast.IsChecked() {
|
||||
isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) {
|
||||
t, err := ExprTypeToType(ast.typeMap[id])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if t.kind == DynKind {
|
||||
return true, nil
|
||||
}
|
||||
for _, vt := range validTypes {
|
||||
k, err := typeValueToKind(vt)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if k == t.kind {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
} else {
|
||||
// if the AST isn't type-checked, short-circuit validation
|
||||
isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
decorators = append(decorators, interpreter.InterpolateFormattedString(isValidType))
|
||||
}
|
||||
|
||||
// Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
|
||||
if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
|
||||
@@ -268,7 +298,7 @@ func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecor
|
||||
}
|
||||
|
||||
// Eval implements the Program interface method.
|
||||
func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error) {
|
||||
func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
|
||||
// Configure error recovery for unexpected panics during evaluation. Note, the use of named
|
||||
// return values makes it possible to modify the error response during the recovery
|
||||
// function.
|
||||
@@ -287,11 +317,11 @@ func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error)
|
||||
switch v := input.(type) {
|
||||
case interpreter.Activation:
|
||||
vars = v
|
||||
case map[string]interface{}:
|
||||
case map[string]any:
|
||||
vars = activationPool.Setup(v)
|
||||
defer activationPool.Put(vars)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]interface{}, got: (%T)%v", input, input)
|
||||
return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
|
||||
}
|
||||
if p.defaultVars != nil {
|
||||
vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
|
||||
@@ -307,7 +337,7 @@ func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error)
|
||||
}
|
||||
|
||||
// ContextEval implements the Program interface.
|
||||
func (p *prog) ContextEval(ctx context.Context, input interface{}) (ref.Val, *EvalDetails, error) {
|
||||
func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, fmt.Errorf("context can not be nil")
|
||||
}
|
||||
@@ -318,22 +348,17 @@ func (p *prog) ContextEval(ctx context.Context, input interface{}) (ref.Val, *Ev
|
||||
case interpreter.Activation:
|
||||
vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency)
|
||||
defer ctxActivationPool.Put(vars)
|
||||
case map[string]interface{}:
|
||||
case map[string]any:
|
||||
rawVars := activationPool.Setup(v)
|
||||
defer activationPool.Put(rawVars)
|
||||
vars = ctxActivationPool.Setup(rawVars, ctx.Done(), p.interruptCheckFrequency)
|
||||
defer ctxActivationPool.Put(vars)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]interface{}, got: (%T)%v", input, input)
|
||||
return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
|
||||
}
|
||||
return p.Eval(vars)
|
||||
}
|
||||
|
||||
// Cost implements the Coster interface method.
|
||||
func (p *prog) Cost() (min, max int64) {
|
||||
return estimateCost(p.interpretable)
|
||||
}
|
||||
|
||||
// progFactory is a helper alias for marking a program creation factory function.
|
||||
type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error)
|
||||
|
||||
@@ -354,7 +379,7 @@ func newProgGen(factory progFactory) (Program, error) {
|
||||
}
|
||||
|
||||
// Eval implements the Program interface method.
|
||||
func (gen *progGen) Eval(input interface{}) (ref.Val, *EvalDetails, error) {
|
||||
func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {
|
||||
// The factory based Eval() differs from the standard evaluation model in that it generates a
|
||||
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful
|
||||
// results.
|
||||
@@ -379,7 +404,7 @@ func (gen *progGen) Eval(input interface{}) (ref.Val, *EvalDetails, error) {
|
||||
}
|
||||
|
||||
// ContextEval implements the Program interface method.
|
||||
func (gen *progGen) ContextEval(ctx context.Context, input interface{}) (ref.Val, *EvalDetails, error) {
|
||||
func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
|
||||
if ctx == nil {
|
||||
return nil, nil, fmt.Errorf("context can not be nil")
|
||||
}
|
||||
@@ -406,29 +431,6 @@ func (gen *progGen) ContextEval(ctx context.Context, input interface{}) (ref.Val
|
||||
return v, det, nil
|
||||
}
|
||||
|
||||
// Cost implements the Coster interface method.
|
||||
func (gen *progGen) Cost() (min, max int64) {
|
||||
// Use an empty state value since no evaluation is performed.
|
||||
p, err := gen.factory(emptyEvalState, nil)
|
||||
if err != nil {
|
||||
return 0, math.MaxInt64
|
||||
}
|
||||
return estimateCost(p)
|
||||
}
|
||||
|
||||
// EstimateCost returns the heuristic cost interval for the program.
|
||||
func EstimateCost(p Program) (min, max int64) {
|
||||
return estimateCost(p)
|
||||
}
|
||||
|
||||
func estimateCost(i interface{}) (min, max int64) {
|
||||
c, ok := i.(interpreter.Coster)
|
||||
if !ok {
|
||||
return 0, math.MaxInt64
|
||||
}
|
||||
return c.Cost()
|
||||
}
|
||||
|
||||
type ctxEvalActivation struct {
|
||||
parent interpreter.Activation
|
||||
interrupt <-chan struct{}
|
||||
@@ -438,7 +440,7 @@ type ctxEvalActivation struct {
|
||||
|
||||
// ResolveName implements the Activation interface method, but adds a special #interrupted variable
|
||||
// which is capable of testing whether a 'done' signal is provided from a context.Context channel.
|
||||
func (a *ctxEvalActivation) ResolveName(name string) (interface{}, bool) {
|
||||
func (a *ctxEvalActivation) ResolveName(name string) (any, bool) {
|
||||
if name == "#interrupted" {
|
||||
a.interruptCheckCount++
|
||||
if a.interruptCheckCount%a.interruptCheckFrequency == 0 {
|
||||
@@ -461,7 +463,7 @@ func (a *ctxEvalActivation) Parent() interpreter.Activation {
|
||||
func newCtxEvalActivationPool() *ctxEvalActivationPool {
|
||||
return &ctxEvalActivationPool{
|
||||
Pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
New: func() any {
|
||||
return &ctxEvalActivation{}
|
||||
},
|
||||
},
|
||||
@@ -483,21 +485,21 @@ func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan s
|
||||
}
|
||||
|
||||
type evalActivation struct {
|
||||
vars map[string]interface{}
|
||||
lazyVars map[string]interface{}
|
||||
vars map[string]any
|
||||
lazyVars map[string]any
|
||||
}
|
||||
|
||||
// ResolveName looks up the value of the input variable name, if found.
|
||||
//
|
||||
// Lazy bindings may be supplied within the map-based input in either of the following forms:
|
||||
// - func() interface{}
|
||||
// - func() any
|
||||
// - func() ref.Val
|
||||
//
|
||||
// The lazy binding will only be invoked once per evaluation.
|
||||
//
|
||||
// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
|
||||
// the ref.TypeAdapter configured in the environment.
|
||||
func (a *evalActivation) ResolveName(name string) (interface{}, bool) {
|
||||
func (a *evalActivation) ResolveName(name string) (any, bool) {
|
||||
v, found := a.vars[name]
|
||||
if !found {
|
||||
return nil, false
|
||||
@@ -510,7 +512,7 @@ func (a *evalActivation) ResolveName(name string) (interface{}, bool) {
|
||||
lazy := obj()
|
||||
a.lazyVars[name] = lazy
|
||||
return lazy, true
|
||||
case func() interface{}:
|
||||
case func() any:
|
||||
if resolved, found := a.lazyVars[name]; found {
|
||||
return resolved, true
|
||||
}
|
||||
@@ -530,8 +532,8 @@ func (a *evalActivation) Parent() interpreter.Activation {
|
||||
func newEvalActivationPool() *evalActivationPool {
|
||||
return &evalActivationPool{
|
||||
Pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &evalActivation{lazyVars: make(map[string]interface{})}
|
||||
New: func() any {
|
||||
return &evalActivation{lazyVars: make(map[string]any)}
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -542,13 +544,13 @@ type evalActivationPool struct {
|
||||
}
|
||||
|
||||
// Setup initializes a pooled Activation object with the map input.
|
||||
func (p *evalActivationPool) Setup(vars map[string]interface{}) *evalActivation {
|
||||
func (p *evalActivationPool) Setup(vars map[string]any) *evalActivation {
|
||||
a := p.Pool.Get().(*evalActivation)
|
||||
a.vars = vars
|
||||
return a
|
||||
}
|
||||
|
||||
func (p *evalActivationPool) Put(value interface{}) {
|
||||
func (p *evalActivationPool) Put(value any) {
|
||||
a := value.(*evalActivation)
|
||||
for k := range a.lazyVars {
|
||||
delete(a.lazyVars, k)
|
||||
@@ -559,7 +561,7 @@ func (p *evalActivationPool) Put(value interface{}) {
|
||||
var (
|
||||
emptyEvalState = interpreter.NewEvalState()
|
||||
|
||||
// activationPool is an internally managed pool of Activation values that wrap map[string]interface{} inputs
|
||||
// activationPool is an internally managed pool of Activation values that wrap map[string]any inputs
|
||||
activationPool = newEvalActivationPool()
|
||||
|
||||
// ctxActivationPool is an internally managed pool of Activation values that expose a special #interrupted variable
|
||||
|
||||
4
vendor/github.com/google/cel-go/checker/BUILD.bazel
generated
vendored
4
vendor/github.com/google/cel-go/checker/BUILD.bazel
generated
vendored
@@ -30,7 +30,7 @@ go_library(
|
||||
"//common/types/pb:go_default_library",
|
||||
"//common/types/ref:go_default_library",
|
||||
"//parser:go_default_library",
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
|
||||
@@ -54,7 +54,7 @@ go_test(
|
||||
"//test:go_default_library",
|
||||
"//test/proto2pb:go_default_library",
|
||||
"//test/proto3pb:go_default_library",
|
||||
"@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
|
||||
"@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
137
vendor/github.com/google/cel-go/checker/checker.go
generated
vendored
137
vendor/github.com/google/cel-go/checker/checker.go
generated
vendored
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/google/cel-go/checker/decls"
|
||||
"github.com/google/cel-go/common"
|
||||
"github.com/google/cel-go/common/containers"
|
||||
"github.com/google/cel-go/common/operators"
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -173,8 +174,8 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
|
||||
|
||||
// Rewrite the node to be a variable reference to the resolved fully-qualified
|
||||
// variable name.
|
||||
c.setType(e, ident.GetIdent().Type)
|
||||
c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().Value))
|
||||
c.setType(e, ident.GetIdent().GetType())
|
||||
c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue()))
|
||||
identName := ident.GetName()
|
||||
e.ExprKind = &exprpb.Expr_IdentExpr{
|
||||
IdentExpr: &exprpb.Expr_Ident{
|
||||
@@ -185,9 +186,37 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
|
||||
}
|
||||
}
|
||||
|
||||
resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false)
|
||||
if sel.TestOnly {
|
||||
resultType = decls.Bool
|
||||
}
|
||||
c.setType(e, substitute(c.mappings, resultType, false))
|
||||
}
|
||||
|
||||
func (c *checker) checkOptSelect(e *exprpb.Expr) {
|
||||
// Collect metadata related to the opt select call packaged by the parser.
|
||||
call := e.GetCallExpr()
|
||||
operand := call.GetArgs()[0]
|
||||
field := call.GetArgs()[1]
|
||||
fieldName, isString := maybeUnwrapString(field)
|
||||
if !isString {
|
||||
c.errors.ReportError(c.location(field), "unsupported optional field selection: %v", field)
|
||||
return
|
||||
}
|
||||
|
||||
// Perform type-checking using the field selection logic.
|
||||
resultType := c.checkSelectField(e, operand, fieldName, true)
|
||||
c.setType(e, substitute(c.mappings, resultType, false))
|
||||
}
|
||||
|
||||
func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *exprpb.Type {
|
||||
// Interpret as field selection, first traversing down the operand.
|
||||
c.check(sel.GetOperand())
|
||||
targetType := substitute(c.mappings, c.getType(sel.GetOperand()), false)
|
||||
c.check(operand)
|
||||
operandType := substitute(c.mappings, c.getType(operand), false)
|
||||
|
||||
// If the target type is 'optional', unwrap it for the sake of this check.
|
||||
targetType, isOpt := maybeUnwrapOptional(operandType)
|
||||
|
||||
// Assume error type by default as most types do not support field selection.
|
||||
resultType := decls.Error
|
||||
switch kindOf(targetType) {
|
||||
@@ -199,7 +228,7 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
|
||||
// Objects yield their field type declaration as the selection result type, but only if
|
||||
// the field is defined.
|
||||
messageType := targetType
|
||||
if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), sel.GetField()); found {
|
||||
if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), field); found {
|
||||
resultType = fieldType.Type
|
||||
}
|
||||
case kindTypeParam:
|
||||
@@ -212,16 +241,17 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
|
||||
default:
|
||||
// Dynamic / error values are treated as DYN type. Errors are handled this way as well
|
||||
// in order to allow forward progress on the check.
|
||||
if isDynOrError(targetType) {
|
||||
resultType = decls.Dyn
|
||||
} else {
|
||||
if !isDynOrError(targetType) {
|
||||
c.errors.typeDoesNotSupportFieldSelection(c.location(e), targetType)
|
||||
}
|
||||
resultType = decls.Dyn
|
||||
}
|
||||
if sel.TestOnly {
|
||||
resultType = decls.Bool
|
||||
|
||||
// If the target type was optional coming in, then the result must be optional going out.
|
||||
if isOpt || optional {
|
||||
return decls.NewOptionalType(resultType)
|
||||
}
|
||||
c.setType(e, substitute(c.mappings, resultType, false))
|
||||
return resultType
|
||||
}
|
||||
|
||||
func (c *checker) checkCall(e *exprpb.Expr) {
|
||||
@@ -229,15 +259,19 @@ func (c *checker) checkCall(e *exprpb.Expr) {
|
||||
// please consider the impact on planner.go and consolidate implementations or mirror code
|
||||
// as appropriate.
|
||||
call := e.GetCallExpr()
|
||||
target := call.GetTarget()
|
||||
args := call.GetArgs()
|
||||
fnName := call.GetFunction()
|
||||
if fnName == operators.OptSelect {
|
||||
c.checkOptSelect(e)
|
||||
return
|
||||
}
|
||||
|
||||
args := call.GetArgs()
|
||||
// Traverse arguments.
|
||||
for _, arg := range args {
|
||||
c.check(arg)
|
||||
}
|
||||
|
||||
target := call.GetTarget()
|
||||
// Regular static call with simple name.
|
||||
if target == nil {
|
||||
// Check for the existence of the function.
|
||||
@@ -359,6 +393,9 @@ func (c *checker) resolveOverload(
|
||||
}
|
||||
|
||||
if resultType == nil {
|
||||
for i, arg := range argTypes {
|
||||
argTypes[i] = substitute(c.mappings, arg, true)
|
||||
}
|
||||
c.errors.noMatchingOverload(loc, fn.GetName(), argTypes, target != nil)
|
||||
resultType = decls.Error
|
||||
return nil
|
||||
@@ -369,16 +406,29 @@ func (c *checker) resolveOverload(
|
||||
|
||||
func (c *checker) checkCreateList(e *exprpb.Expr) {
|
||||
create := e.GetListExpr()
|
||||
var elemType *exprpb.Type
|
||||
for _, e := range create.GetElements() {
|
||||
var elemsType *exprpb.Type
|
||||
optionalIndices := create.GetOptionalIndices()
|
||||
optionals := make(map[int32]bool, len(optionalIndices))
|
||||
for _, optInd := range optionalIndices {
|
||||
optionals[optInd] = true
|
||||
}
|
||||
for i, e := range create.GetElements() {
|
||||
c.check(e)
|
||||
elemType = c.joinTypes(c.location(e), elemType, c.getType(e))
|
||||
elemType := c.getType(e)
|
||||
if optionals[int32(i)] {
|
||||
var isOptional bool
|
||||
elemType, isOptional = maybeUnwrapOptional(elemType)
|
||||
if !isOptional && !isDyn(elemType) {
|
||||
c.errors.typeMismatch(c.location(e), decls.NewOptionalType(elemType), elemType)
|
||||
}
|
||||
}
|
||||
elemsType = c.joinTypes(c.location(e), elemsType, elemType)
|
||||
}
|
||||
if elemType == nil {
|
||||
if elemsType == nil {
|
||||
// If the list is empty, assign free type var to elem type.
|
||||
elemType = c.newTypeVar()
|
||||
elemsType = c.newTypeVar()
|
||||
}
|
||||
c.setType(e, decls.NewListType(elemType))
|
||||
c.setType(e, decls.NewListType(elemsType))
|
||||
}
|
||||
|
||||
func (c *checker) checkCreateStruct(e *exprpb.Expr) {
|
||||
@@ -392,22 +442,31 @@ func (c *checker) checkCreateStruct(e *exprpb.Expr) {
|
||||
|
||||
func (c *checker) checkCreateMap(e *exprpb.Expr) {
|
||||
mapVal := e.GetStructExpr()
|
||||
var keyType *exprpb.Type
|
||||
var valueType *exprpb.Type
|
||||
var mapKeyType *exprpb.Type
|
||||
var mapValueType *exprpb.Type
|
||||
for _, ent := range mapVal.GetEntries() {
|
||||
key := ent.GetMapKey()
|
||||
c.check(key)
|
||||
keyType = c.joinTypes(c.location(key), keyType, c.getType(key))
|
||||
mapKeyType = c.joinTypes(c.location(key), mapKeyType, c.getType(key))
|
||||
|
||||
c.check(ent.Value)
|
||||
valueType = c.joinTypes(c.location(ent.Value), valueType, c.getType(ent.Value))
|
||||
val := ent.GetValue()
|
||||
c.check(val)
|
||||
valType := c.getType(val)
|
||||
if ent.GetOptionalEntry() {
|
||||
var isOptional bool
|
||||
valType, isOptional = maybeUnwrapOptional(valType)
|
||||
if !isOptional && !isDyn(valType) {
|
||||
c.errors.typeMismatch(c.location(val), decls.NewOptionalType(valType), valType)
|
||||
}
|
||||
}
|
||||
mapValueType = c.joinTypes(c.location(val), mapValueType, valType)
|
||||
}
|
||||
if keyType == nil {
|
||||
if mapKeyType == nil {
|
||||
// If the map is empty, assign free type variables to typeKey and value type.
|
||||
keyType = c.newTypeVar()
|
||||
valueType = c.newTypeVar()
|
||||
mapKeyType = c.newTypeVar()
|
||||
mapValueType = c.newTypeVar()
|
||||
}
|
||||
c.setType(e, decls.NewMapType(keyType, valueType))
|
||||
c.setType(e, decls.NewMapType(mapKeyType, mapValueType))
|
||||
}
|
||||
|
||||
func (c *checker) checkCreateMessage(e *exprpb.Expr) {
|
||||
@@ -449,15 +508,21 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) {
|
||||
c.check(value)
|
||||
|
||||
fieldType := decls.Error
|
||||
if t, found := c.lookupFieldType(
|
||||
c.locationByID(ent.GetId()),
|
||||
messageType.GetMessageType(),
|
||||
field); found {
|
||||
fieldType = t.Type
|
||||
ft, found := c.lookupFieldType(c.locationByID(ent.GetId()), messageType.GetMessageType(), field)
|
||||
if found {
|
||||
fieldType = ft.Type
|
||||
}
|
||||
if !c.isAssignable(fieldType, c.getType(value)) {
|
||||
c.errors.fieldTypeMismatch(
|
||||
c.locationByID(ent.Id), field, fieldType, c.getType(value))
|
||||
|
||||
valType := c.getType(value)
|
||||
if ent.GetOptionalEntry() {
|
||||
var isOptional bool
|
||||
valType, isOptional = maybeUnwrapOptional(valType)
|
||||
if !isOptional && !isDyn(valType) {
|
||||
c.errors.typeMismatch(c.location(value), decls.NewOptionalType(valType), valType)
|
||||
}
|
||||
}
|
||||
if !c.isAssignable(fieldType, valType) {
|
||||
c.errors.fieldTypeMismatch(c.locationByID(ent.Id), field, fieldType, valType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
59
vendor/github.com/google/cel-go/checker/cost.go
generated
vendored
59
vendor/github.com/google/cel-go/checker/cost.go
generated
vendored
@@ -92,7 +92,10 @@ func (e astNode) ComputedSize() *SizeEstimate {
|
||||
case *exprpb.Expr_ConstExpr:
|
||||
switch ck := ek.ConstExpr.GetConstantKind().(type) {
|
||||
case *exprpb.Constant_StringValue:
|
||||
v = uint64(len(ck.StringValue))
|
||||
// converting to runes here is an O(n) operation, but
|
||||
// this is consistent with how size is computed at runtime,
|
||||
// and how the language definition defines string size
|
||||
v = uint64(len([]rune(ck.StringValue)))
|
||||
case *exprpb.Constant_BytesValue:
|
||||
v = uint64(len(ck.BytesValue))
|
||||
case *exprpb.Constant_BoolValue, *exprpb.Constant_DoubleValue, *exprpb.Constant_DurationValue,
|
||||
@@ -258,6 +261,8 @@ type coster struct {
|
||||
computedSizes map[int64]SizeEstimate
|
||||
checkedExpr *exprpb.CheckedExpr
|
||||
estimator CostEstimator
|
||||
// presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
|
||||
presenceTestCost CostEstimate
|
||||
}
|
||||
|
||||
// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names.
|
||||
@@ -280,16 +285,39 @@ func (vs iterRangeScopes) peek(varName string) (int64, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Cost estimates the cost of the parsed and type checked CEL expression.
|
||||
func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator) CostEstimate {
|
||||
c := coster{
|
||||
checkedExpr: checker,
|
||||
estimator: estimator,
|
||||
exprPath: map[int64][]string{},
|
||||
iterRanges: map[string][]int64{},
|
||||
computedSizes: map[int64]SizeEstimate{},
|
||||
// CostOption configures flags which affect cost computations.
|
||||
type CostOption func(*coster) error
|
||||
|
||||
// PresenceTestHasCost determines whether presence testing has a cost of one or zero.
|
||||
// Defaults to presence test has a cost of one.
|
||||
func PresenceTestHasCost(hasCost bool) CostOption {
|
||||
return func(c *coster) error {
|
||||
if hasCost {
|
||||
c.presenceTestCost = selectAndIdentCost
|
||||
return nil
|
||||
}
|
||||
c.presenceTestCost = CostEstimate{Min: 0, Max: 0}
|
||||
return nil
|
||||
}
|
||||
return c.cost(checker.GetExpr())
|
||||
}
|
||||
|
||||
// Cost estimates the cost of the parsed and type checked CEL expression.
|
||||
func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
|
||||
c := &coster{
|
||||
checkedExpr: checker,
|
||||
estimator: estimator,
|
||||
exprPath: map[int64][]string{},
|
||||
iterRanges: map[string][]int64{},
|
||||
computedSizes: map[int64]SizeEstimate{},
|
||||
presenceTestCost: CostEstimate{Min: 1, Max: 1},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
err := opt(c)
|
||||
if err != nil {
|
||||
return CostEstimate{}, err
|
||||
}
|
||||
}
|
||||
return c.cost(checker.GetExpr()), nil
|
||||
}
|
||||
|
||||
func (c *coster) cost(e *exprpb.Expr) CostEstimate {
|
||||
@@ -340,6 +368,12 @@ func (c *coster) costSelect(e *exprpb.Expr) CostEstimate {
|
||||
sel := e.GetSelectExpr()
|
||||
var sum CostEstimate
|
||||
if sel.GetTestOnly() {
|
||||
// recurse, but do not add any cost
|
||||
// this is equivalent to how evalTestOnly increments the runtime cost counter
|
||||
// but does not add any additional cost for the qualifier, except here we do
|
||||
// the reverse (ident adds cost)
|
||||
sum = sum.Add(c.presenceTestCost)
|
||||
sum = sum.Add(c.cost(sel.GetOperand()))
|
||||
return sum
|
||||
}
|
||||
sum = sum.Add(c.cost(sel.GetOperand()))
|
||||
@@ -503,7 +537,10 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
|
||||
}
|
||||
switch overloadID {
|
||||
// O(n) functions
|
||||
case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString:
|
||||
case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString:
|
||||
if overloadID == overloads.ExtFormatString {
|
||||
return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
|
||||
}
|
||||
if len(args) == 1 {
|
||||
return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
|
||||
}
|
||||
|
||||
2
vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
generated
vendored
2
vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
generated
vendored
@@ -13,7 +13,7 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/google/cel-go/checker/decls",
|
||||
deps = [
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
|
||||
],
|
||||
|
||||
8
vendor/github.com/google/cel-go/checker/decls/decls.go
generated
vendored
8
vendor/github.com/google/cel-go/checker/decls/decls.go
generated
vendored
@@ -16,9 +16,9 @@
|
||||
package decls
|
||||
|
||||
import (
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
structpb "google.golang.org/protobuf/types/known/structpb"
|
||||
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -64,6 +64,12 @@ func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type {
|
||||
ParameterTypes: paramTypes}}}
|
||||
}
|
||||
|
||||
// NewOptionalType constructs an abstract type indicating that the parameterized type
|
||||
// may be contained within the object.
|
||||
func NewOptionalType(paramType *exprpb.Type) *exprpb.Type {
|
||||
return NewAbstractType("optional", paramType)
|
||||
}
|
||||
|
||||
// NewFunctionType creates a function invocation contract, typically only used
|
||||
// by type-checking steps after overload resolution.
|
||||
func NewFunctionType(resultType *exprpb.Type,
|
||||
|
||||
27
vendor/github.com/google/cel-go/checker/env.go
generated
vendored
27
vendor/github.com/google/cel-go/checker/env.go
generated
vendored
@@ -226,7 +226,7 @@ func (e *Env) setFunction(decl *exprpb.Decl) []errorMsg {
|
||||
newOverloads := []*exprpb.Decl_FunctionDecl_Overload{}
|
||||
for _, overload := range overloads {
|
||||
existing, found := existingOverloads[overload.GetOverloadId()]
|
||||
if !found || !proto.Equal(existing, overload) {
|
||||
if !found || !overloadsEqual(existing, overload) {
|
||||
newOverloads = append(newOverloads, overload)
|
||||
}
|
||||
}
|
||||
@@ -264,6 +264,31 @@ func (e *Env) isOverloadDisabled(overloadID string) bool {
|
||||
return found
|
||||
}
|
||||
|
||||
// overloadsEqual returns whether two overloads have identical signatures.
|
||||
//
|
||||
// type parameter names are ignored as they may be specified in any order and have no bearing on overload
|
||||
// equivalence
|
||||
func overloadsEqual(o1, o2 *exprpb.Decl_FunctionDecl_Overload) bool {
|
||||
return o1.GetOverloadId() == o2.GetOverloadId() &&
|
||||
o1.GetIsInstanceFunction() == o2.GetIsInstanceFunction() &&
|
||||
paramsEqual(o1.GetParams(), o2.GetParams()) &&
|
||||
proto.Equal(o1.GetResultType(), o2.GetResultType())
|
||||
}
|
||||
|
||||
// paramsEqual returns whether two lists have equal length and all types are equal
|
||||
func paramsEqual(p1, p2 []*exprpb.Type) bool {
|
||||
if len(p1) != len(p2) {
|
||||
return false
|
||||
}
|
||||
for i, a := range p1 {
|
||||
b := p2[i]
|
||||
if !proto.Equal(a, b) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// sanitizeFunction replaces well-known types referenced by message name with their equivalent
|
||||
// CEL built-in type instances.
|
||||
func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl {
|
||||
|
||||
2
vendor/github.com/google/cel-go/checker/printer.go
generated
vendored
2
vendor/github.com/google/cel-go/checker/printer.go
generated
vendored
@@ -26,7 +26,7 @@ type semanticAdorner struct {
|
||||
|
||||
var _ debug.Adorner = &semanticAdorner{}
|
||||
|
||||
func (a *semanticAdorner) GetMetadata(elem interface{}) string {
|
||||
func (a *semanticAdorner) GetMetadata(elem any) string {
|
||||
result := ""
|
||||
e, isExpr := elem.(*exprpb.Expr)
|
||||
if !isExpr {
|
||||
|
||||
2
vendor/github.com/google/cel-go/checker/standard.go
generated
vendored
2
vendor/github.com/google/cel-go/checker/standard.go
generated
vendored
@@ -287,6 +287,8 @@ func init() {
|
||||
decls.NewInstanceOverload(overloads.EndsWithString,
|
||||
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
|
||||
decls.NewFunction(overloads.Matches,
|
||||
decls.NewOverload(overloads.Matches,
|
||||
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
|
||||
decls.NewInstanceOverload(overloads.MatchesString,
|
||||
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
|
||||
decls.NewFunction(overloads.StartsWith,
|
||||
|
||||
47
vendor/github.com/google/cel-go/checker/types.go
generated
vendored
47
vendor/github.com/google/cel-go/checker/types.go
generated
vendored
@@ -90,6 +90,14 @@ func FormatCheckedType(t *exprpb.Type) string {
|
||||
return "!error!"
|
||||
case kindTypeParam:
|
||||
return t.GetTypeParam()
|
||||
case kindAbstract:
|
||||
at := t.GetAbstractType()
|
||||
params := at.GetParameterTypes()
|
||||
paramStrs := make([]string, len(params))
|
||||
for i, p := range params {
|
||||
paramStrs[i] = FormatCheckedType(p)
|
||||
}
|
||||
return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", "))
|
||||
}
|
||||
return t.String()
|
||||
}
|
||||
@@ -110,12 +118,39 @@ func isDyn(t *exprpb.Type) bool {
|
||||
|
||||
// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
|
||||
func isDynOrError(t *exprpb.Type) bool {
|
||||
switch kindOf(t) {
|
||||
case kindError:
|
||||
return true
|
||||
default:
|
||||
return isDyn(t)
|
||||
return isError(t) || isDyn(t)
|
||||
}
|
||||
|
||||
func isError(t *exprpb.Type) bool {
|
||||
return kindOf(t) == kindError
|
||||
}
|
||||
|
||||
func isOptional(t *exprpb.Type) bool {
|
||||
if kindOf(t) == kindAbstract {
|
||||
at := t.GetAbstractType()
|
||||
return at.GetName() == "optional"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func maybeUnwrapOptional(t *exprpb.Type) (*exprpb.Type, bool) {
|
||||
if isOptional(t) {
|
||||
at := t.GetAbstractType()
|
||||
return at.GetParameterTypes()[0], true
|
||||
}
|
||||
return t, false
|
||||
}
|
||||
|
||||
func maybeUnwrapString(e *exprpb.Expr) (string, bool) {
|
||||
switch e.GetExprKind().(type) {
|
||||
case *exprpb.Expr_ConstExpr:
|
||||
literal := e.GetConstExpr()
|
||||
switch literal.GetConstantKind().(type) {
|
||||
case *exprpb.Constant_StringValue:
|
||||
return literal.GetStringValue(), true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
|
||||
@@ -236,7 +271,7 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
|
||||
// substitution for t1, and whether t2 has a type substitution in mapping m.
|
||||
//
|
||||
// The type t2 is a valid substitution for t1 if any of the following statements is true
|
||||
// - t2 has a type substitition (t2sub) equal to t1
|
||||
// - t2 has a type substitution (t2sub) equal to t1
|
||||
// - t2 has a type substitution (t2sub) assignable to t1
|
||||
// - t2 does not occur within t1.
|
||||
func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub bool) {
|
||||
|
||||
2
vendor/github.com/google/cel-go/common/BUILD.bazel
generated
vendored
2
vendor/github.com/google/cel-go/common/BUILD.bazel
generated
vendored
@@ -17,7 +17,7 @@ go_library(
|
||||
importpath = "github.com/google/cel-go/common",
|
||||
deps = [
|
||||
"//common/runes:go_default_library",
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
"@org_golang_x_text//width:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
4
vendor/github.com/google/cel-go/common/containers/BUILD.bazel
generated
vendored
4
vendor/github.com/google/cel-go/common/containers/BUILD.bazel
generated
vendored
@@ -12,7 +12,7 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/google/cel-go/common/containers",
|
||||
deps = [
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -26,6 +26,6 @@ go_test(
|
||||
":go_default_library",
|
||||
],
|
||||
deps = [
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
2
vendor/github.com/google/cel-go/common/debug/BUILD.bazel
generated
vendored
2
vendor/github.com/google/cel-go/common/debug/BUILD.bazel
generated
vendored
@@ -13,6 +13,6 @@ go_library(
|
||||
importpath = "github.com/google/cel-go/common/debug",
|
||||
deps = [
|
||||
"//common:go_default_library",
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
14
vendor/github.com/google/cel-go/common/debug/debug.go
generated
vendored
14
vendor/github.com/google/cel-go/common/debug/debug.go
generated
vendored
@@ -29,7 +29,7 @@ import (
|
||||
// representation of an expression.
|
||||
type Adorner interface {
|
||||
// GetMetadata for the input context.
|
||||
GetMetadata(ctx interface{}) string
|
||||
GetMetadata(ctx any) string
|
||||
}
|
||||
|
||||
// Writer manages writing expressions to an internal string.
|
||||
@@ -46,7 +46,7 @@ type emptyDebugAdorner struct {
|
||||
|
||||
var emptyAdorner Adorner = &emptyDebugAdorner{}
|
||||
|
||||
func (a *emptyDebugAdorner) GetMetadata(e interface{}) string {
|
||||
func (a *emptyDebugAdorner) GetMetadata(e any) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -170,6 +170,9 @@ func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) {
|
||||
w.append(",")
|
||||
w.appendLine()
|
||||
}
|
||||
if entry.GetOptionalEntry() {
|
||||
w.append("?")
|
||||
}
|
||||
w.append(entry.GetFieldKey())
|
||||
w.append(":")
|
||||
w.Buffer(entry.GetValue())
|
||||
@@ -191,6 +194,9 @@ func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) {
|
||||
w.append(",")
|
||||
w.appendLine()
|
||||
}
|
||||
if entry.GetOptionalEntry() {
|
||||
w.append("?")
|
||||
}
|
||||
w.Buffer(entry.GetMapKey())
|
||||
w.append(":")
|
||||
w.Buffer(entry.GetValue())
|
||||
@@ -269,7 +275,7 @@ func (w *debugWriter) append(s string) {
|
||||
w.buffer.WriteString(s)
|
||||
}
|
||||
|
||||
func (w *debugWriter) appendFormat(f string, args ...interface{}) {
|
||||
func (w *debugWriter) appendFormat(f string, args ...any) {
|
||||
w.append(fmt.Sprintf(f, args...))
|
||||
}
|
||||
|
||||
@@ -280,7 +286,7 @@ func (w *debugWriter) doIndent() {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *debugWriter) adorn(e interface{}) {
|
||||
func (w *debugWriter) adorn(e any) {
|
||||
w.append(w.adorner.GetMetadata(e))
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/google/cel-go/common/errors.go
generated
vendored
2
vendor/github.com/google/cel-go/common/errors.go
generated
vendored
@@ -38,7 +38,7 @@ func NewErrors(source Source) *Errors {
|
||||
}
|
||||
|
||||
// ReportError records an error at a source location.
|
||||
func (e *Errors) ReportError(l Location, format string, args ...interface{}) {
|
||||
func (e *Errors) ReportError(l Location, format string, args ...any) {
|
||||
e.numErrors++
|
||||
if e.numErrors > e.maxErrorsToReport {
|
||||
return
|
||||
|
||||
4
vendor/github.com/google/cel-go/common/operators/operators.go
generated
vendored
4
vendor/github.com/google/cel-go/common/operators/operators.go
generated
vendored
@@ -37,6 +37,8 @@ const (
|
||||
Modulo = "_%_"
|
||||
Negate = "-_"
|
||||
Index = "_[_]"
|
||||
OptIndex = "_[?_]"
|
||||
OptSelect = "_?._"
|
||||
|
||||
// Macros, must have a valid identifier.
|
||||
Has = "has"
|
||||
@@ -99,6 +101,8 @@ var (
|
||||
LogicalNot: {displayName: "!", precedence: 2, arity: 1},
|
||||
Negate: {displayName: "-", precedence: 2, arity: 1},
|
||||
Index: {displayName: "", precedence: 1, arity: 2},
|
||||
OptIndex: {displayName: "", precedence: 1, arity: 2},
|
||||
OptSelect: {displayName: "", precedence: 1, arity: 2},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
10
vendor/github.com/google/cel-go/common/overloads/overloads.go
generated
vendored
10
vendor/github.com/google/cel-go/common/overloads/overloads.go
generated
vendored
@@ -148,6 +148,11 @@ const (
|
||||
StartsWith = "startsWith"
|
||||
)
|
||||
|
||||
// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
|
||||
const (
|
||||
ExtQuoteString = "strings_quote"
|
||||
)
|
||||
|
||||
// String function overload names.
|
||||
const (
|
||||
ContainsString = "contains_string"
|
||||
@@ -156,6 +161,11 @@ const (
|
||||
StartsWithString = "starts_with_string"
|
||||
)
|
||||
|
||||
// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
|
||||
const (
|
||||
ExtFormatString = "string_format"
|
||||
)
|
||||
|
||||
// Time-based functions.
|
||||
const (
|
||||
TimeGetFullYear = "getFullYear"
|
||||
|
||||
10
vendor/github.com/google/cel-go/common/types/BUILD.bazel
generated
vendored
10
vendor/github.com/google/cel-go/common/types/BUILD.bazel
generated
vendored
@@ -22,6 +22,7 @@ go_library(
|
||||
"map.go",
|
||||
"null.go",
|
||||
"object.go",
|
||||
"optional.go",
|
||||
"overflow.go",
|
||||
"provider.go",
|
||||
"string.go",
|
||||
@@ -38,10 +39,8 @@ go_library(
|
||||
"//common/types/ref:go_default_library",
|
||||
"//common/types/traits:go_default_library",
|
||||
"@com_github_stoewer_go_strcase//:go_default_library",
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto//googleapis/rpc/status:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//encoding/protojson:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
|
||||
@@ -68,6 +67,7 @@ go_test(
|
||||
"map_test.go",
|
||||
"null_test.go",
|
||||
"object_test.go",
|
||||
"optional_test.go",
|
||||
"provider_test.go",
|
||||
"string_test.go",
|
||||
"timestamp_test.go",
|
||||
@@ -80,7 +80,7 @@ go_test(
|
||||
"//common/types/ref:go_default_library",
|
||||
"//test:go_default_library",
|
||||
"//test/proto3pb:test_all_types_go_proto",
|
||||
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
|
||||
"@org_golang_google_protobuf//encoding/protojson:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/anypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/durationpb:go_default_library",
|
||||
|
||||
9
vendor/github.com/google/cel-go/common/types/bool.go
generated
vendored
9
vendor/github.com/google/cel-go/common/types/bool.go
generated
vendored
@@ -62,7 +62,7 @@ func (b Bool) Compare(other ref.Val) ref.Val {
|
||||
}
|
||||
|
||||
// ConvertToNative implements the ref.Val interface method.
|
||||
func (b Bool) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
|
||||
func (b Bool) ConvertToNative(typeDesc reflect.Type) (any, error) {
|
||||
switch typeDesc.Kind() {
|
||||
case reflect.Bool:
|
||||
return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
|
||||
@@ -114,6 +114,11 @@ func (b Bool) Equal(other ref.Val) ref.Val {
|
||||
return Bool(ok && b == otherBool)
|
||||
}
|
||||
|
||||
// IsZeroValue returns true if the boolean value is false.
|
||||
func (b Bool) IsZeroValue() bool {
|
||||
return b == False
|
||||
}
|
||||
|
||||
// Negate implements the traits.Negater interface method.
|
||||
func (b Bool) Negate() ref.Val {
|
||||
return !b
|
||||
@@ -125,7 +130,7 @@ func (b Bool) Type() ref.Type {
|
||||
}
|
||||
|
||||
// Value implements the ref.Val interface method.
|
||||
func (b Bool) Value() interface{} {
|
||||
func (b Bool) Value() any {
|
||||
return bool(b)
|
||||
}
|
||||
|
||||
|
||||
9
vendor/github.com/google/cel-go/common/types/bytes.go
generated
vendored
9
vendor/github.com/google/cel-go/common/types/bytes.go
generated
vendored
@@ -63,7 +63,7 @@ func (b Bytes) Compare(other ref.Val) ref.Val {
|
||||
}
|
||||
|
||||
// ConvertToNative implements the ref.Val interface method.
|
||||
func (b Bytes) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
|
||||
func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) {
|
||||
switch typeDesc.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
|
||||
@@ -116,6 +116,11 @@ func (b Bytes) Equal(other ref.Val) ref.Val {
|
||||
return Bool(ok && bytes.Equal(b, otherBytes))
|
||||
}
|
||||
|
||||
// IsZeroValue returns true if the byte array is empty.
|
||||
func (b Bytes) IsZeroValue() bool {
|
||||
return len(b) == 0
|
||||
}
|
||||
|
||||
// Size implements the traits.Sizer interface method.
|
||||
func (b Bytes) Size() ref.Val {
|
||||
return Int(len(b))
|
||||
@@ -127,6 +132,6 @@ func (b Bytes) Type() ref.Type {
|
||||
}
|
||||
|
||||
// Value implements the ref.Val interface method.
|
||||
func (b Bytes) Value() interface{} {
|
||||
func (b Bytes) Value() any {
|
||||
return []byte(b)
|
||||
}
|
||||
|
||||
13
vendor/github.com/google/cel-go/common/types/double.go
generated
vendored
13
vendor/github.com/google/cel-go/common/types/double.go
generated
vendored
@@ -78,7 +78,7 @@ func (d Double) Compare(other ref.Val) ref.Val {
|
||||
}
|
||||
|
||||
// ConvertToNative implements ref.Val.ConvertToNative.
|
||||
func (d Double) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
|
||||
func (d Double) ConvertToNative(typeDesc reflect.Type) (any, error) {
|
||||
switch typeDesc.Kind() {
|
||||
case reflect.Float32:
|
||||
v := float32(d)
|
||||
@@ -134,13 +134,13 @@ func (d Double) ConvertToType(typeVal ref.Type) ref.Val {
|
||||
case IntType:
|
||||
i, err := doubleToInt64Checked(float64(d))
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return Int(i)
|
||||
case UintType:
|
||||
i, err := doubleToUint64Checked(float64(d))
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return Uint(i)
|
||||
case DoubleType:
|
||||
@@ -182,6 +182,11 @@ func (d Double) Equal(other ref.Val) ref.Val {
|
||||
}
|
||||
}
|
||||
|
||||
// IsZeroValue returns true if double value is 0.0
|
||||
func (d Double) IsZeroValue() bool {
|
||||
return float64(d) == 0.0
|
||||
}
|
||||
|
||||
// Multiply implements traits.Multiplier.Multiply.
|
||||
func (d Double) Multiply(other ref.Val) ref.Val {
|
||||
otherDouble, ok := other.(Double)
|
||||
@@ -211,6 +216,6 @@ func (d Double) Type() ref.Type {
|
||||
}
|
||||
|
||||
// Value implements ref.Val.Value.
|
||||
func (d Double) Value() interface{} {
|
||||
func (d Double) Value() any {
|
||||
return float64(d)
|
||||
}
|
||||
|
||||
17
vendor/github.com/google/cel-go/common/types/duration.go
generated
vendored
17
vendor/github.com/google/cel-go/common/types/duration.go
generated
vendored
@@ -57,14 +57,14 @@ func (d Duration) Add(other ref.Val) ref.Val {
|
||||
dur2 := other.(Duration)
|
||||
val, err := addDurationChecked(d.Duration, dur2.Duration)
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return durationOf(val)
|
||||
case TimestampType:
|
||||
ts := other.(Timestamp).Time
|
||||
val, err := addTimeDurationChecked(ts, d.Duration)
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return timestampOf(val)
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func (d Duration) Compare(other ref.Val) ref.Val {
|
||||
}
|
||||
|
||||
// ConvertToNative implements ref.Val.ConvertToNative.
|
||||
func (d Duration) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
|
||||
func (d Duration) ConvertToNative(typeDesc reflect.Type) (any, error) {
|
||||
// If the duration is already assignable to the desired type return it.
|
||||
if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) {
|
||||
return d.Duration, nil
|
||||
@@ -138,11 +138,16 @@ func (d Duration) Equal(other ref.Val) ref.Val {
|
||||
return Bool(ok && d.Duration == otherDur.Duration)
|
||||
}
|
||||
|
||||
// IsZeroValue returns true if the duration value is zero
|
||||
func (d Duration) IsZeroValue() bool {
|
||||
return d.Duration == 0
|
||||
}
|
||||
|
||||
// Negate implements traits.Negater.Negate.
|
||||
func (d Duration) Negate() ref.Val {
|
||||
val, err := negateDurationChecked(d.Duration)
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return durationOf(val)
|
||||
}
|
||||
@@ -165,7 +170,7 @@ func (d Duration) Subtract(subtrahend ref.Val) ref.Val {
|
||||
}
|
||||
val, err := subtractDurationChecked(d.Duration, subtraDur.Duration)
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return durationOf(val)
|
||||
}
|
||||
@@ -176,7 +181,7 @@ func (d Duration) Type() ref.Type {
|
||||
}
|
||||
|
||||
// Value implements ref.Val.Value.
|
||||
func (d Duration) Value() interface{} {
|
||||
func (d Duration) Value() any {
|
||||
return d.Duration
|
||||
}
|
||||
|
||||
|
||||
25
vendor/github.com/google/cel-go/common/types/err.go
generated
vendored
25
vendor/github.com/google/cel-go/common/types/err.go
generated
vendored
@@ -22,6 +22,12 @@ import (
|
||||
"github.com/google/cel-go/common/types/ref"
|
||||
)
|
||||
|
||||
// Error interface which allows types types.Err values to be treated as error values.
|
||||
type Error interface {
|
||||
error
|
||||
ref.Val
|
||||
}
|
||||
|
||||
// Err type which extends the built-in go error and implements ref.Val.
|
||||
type Err struct {
|
||||
error
|
||||
@@ -51,7 +57,7 @@ var (
|
||||
|
||||
// NewErr creates a new Err described by the format string and args.
|
||||
// TODO: Audit the use of this function and standardize the error messages and codes.
|
||||
func NewErr(format string, args ...interface{}) ref.Val {
|
||||
func NewErr(format string, args ...any) ref.Val {
|
||||
return &Err{fmt.Errorf(format, args...)}
|
||||
}
|
||||
|
||||
@@ -62,7 +68,7 @@ func NoSuchOverloadErr() ref.Val {
|
||||
|
||||
// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion
|
||||
// message that indicates that the native value could not be converted to a CEL ref.Val.
|
||||
func UnsupportedRefValConversionErr(val interface{}) ref.Val {
|
||||
func UnsupportedRefValConversionErr(val any) ref.Val {
|
||||
return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val)
|
||||
}
|
||||
|
||||
@@ -74,20 +80,20 @@ func MaybeNoSuchOverloadErr(val ref.Val) ref.Val {
|
||||
|
||||
// ValOrErr either returns the existing error or creates a new one.
|
||||
// TODO: Audit the use of this function and standardize the error messages and codes.
|
||||
func ValOrErr(val ref.Val, format string, args ...interface{}) ref.Val {
|
||||
func ValOrErr(val ref.Val, format string, args ...any) ref.Val {
|
||||
if val == nil || !IsUnknownOrError(val) {
|
||||
return NewErr(format, args...)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// wrapErr wraps an existing Go error value into a CEL Err value.
|
||||
func wrapErr(err error) ref.Val {
|
||||
// WrapErr wraps an existing Go error value into a CEL Err value.
|
||||
func WrapErr(err error) ref.Val {
|
||||
return &Err{error: err}
|
||||
}
|
||||
|
||||
// ConvertToNative implements ref.Val.ConvertToNative.
|
||||
func (e *Err) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
|
||||
func (e *Err) ConvertToNative(typeDesc reflect.Type) (any, error) {
|
||||
return nil, e.error
|
||||
}
|
||||
|
||||
@@ -114,10 +120,15 @@ func (e *Err) Type() ref.Type {
|
||||
}
|
||||
|
||||
// Value implements ref.Val.Value.
|
||||
func (e *Err) Value() interface{} {
|
||||
func (e *Err) Value() any {
|
||||
return e.error
|
||||
}
|
||||
|
||||
// Is implements errors.Is.
|
||||
func (e *Err) Is(target error) bool {
|
||||
return e.error.Error() == target.Error()
|
||||
}
|
||||
|
||||
// IsError returns whether the input element ref.Type or ref.Val is equal to
|
||||
// the ErrType singleton.
|
||||
func IsError(val ref.Val) bool {
|
||||
|
||||
23
vendor/github.com/google/cel-go/common/types/int.go
generated
vendored
23
vendor/github.com/google/cel-go/common/types/int.go
generated
vendored
@@ -66,7 +66,7 @@ func (i Int) Add(other ref.Val) ref.Val {
|
||||
}
|
||||
val, err := addInt64Checked(int64(i), int64(otherInt))
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return Int(val)
|
||||
}
|
||||
@@ -89,7 +89,7 @@ func (i Int) Compare(other ref.Val) ref.Val {
|
||||
}
|
||||
|
||||
// ConvertToNative implements ref.Val.ConvertToNative.
|
||||
func (i Int) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
|
||||
func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) {
|
||||
switch typeDesc.Kind() {
|
||||
case reflect.Int, reflect.Int32:
|
||||
// Enums are also mapped as int32 derivations.
|
||||
@@ -176,7 +176,7 @@ func (i Int) ConvertToType(typeVal ref.Type) ref.Val {
|
||||
case UintType:
|
||||
u, err := int64ToUint64Checked(int64(i))
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return Uint(u)
|
||||
case DoubleType:
|
||||
@@ -204,7 +204,7 @@ func (i Int) Divide(other ref.Val) ref.Val {
|
||||
}
|
||||
val, err := divideInt64Checked(int64(i), int64(otherInt))
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return Int(val)
|
||||
}
|
||||
@@ -226,6 +226,11 @@ func (i Int) Equal(other ref.Val) ref.Val {
|
||||
}
|
||||
}
|
||||
|
||||
// IsZeroValue returns true if integer is equal to 0
|
||||
func (i Int) IsZeroValue() bool {
|
||||
return i == IntZero
|
||||
}
|
||||
|
||||
// Modulo implements traits.Modder.Modulo.
|
||||
func (i Int) Modulo(other ref.Val) ref.Val {
|
||||
otherInt, ok := other.(Int)
|
||||
@@ -234,7 +239,7 @@ func (i Int) Modulo(other ref.Val) ref.Val {
|
||||
}
|
||||
val, err := moduloInt64Checked(int64(i), int64(otherInt))
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return Int(val)
|
||||
}
|
||||
@@ -247,7 +252,7 @@ func (i Int) Multiply(other ref.Val) ref.Val {
|
||||
}
|
||||
val, err := multiplyInt64Checked(int64(i), int64(otherInt))
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return Int(val)
|
||||
}
|
||||
@@ -256,7 +261,7 @@ func (i Int) Multiply(other ref.Val) ref.Val {
|
||||
func (i Int) Negate() ref.Val {
|
||||
val, err := negateInt64Checked(int64(i))
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return Int(val)
|
||||
}
|
||||
@@ -269,7 +274,7 @@ func (i Int) Subtract(subtrahend ref.Val) ref.Val {
|
||||
}
|
||||
val, err := subtractInt64Checked(int64(i), int64(subtraInt))
|
||||
if err != nil {
|
||||
return wrapErr(err)
|
||||
return WrapErr(err)
|
||||
}
|
||||
return Int(val)
|
||||
}
|
||||
@@ -280,7 +285,7 @@ func (i Int) Type() ref.Type {
|
||||
}
|
||||
|
||||
// Value implements ref.Val.Value.
|
||||
func (i Int) Value() interface{} {
|
||||
func (i Int) Value() any {
|
||||
return int64(i)
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user