diff --git a/runtime/Go/antlr/v4/array_prediction_context.go b/runtime/Go/antlr/v4/array_prediction_context.go deleted file mode 100644 index 9ad7eceb66..0000000000 --- a/runtime/Go/antlr/v4/array_prediction_context.go +++ /dev/null @@ -1,115 +0,0 @@ -package antlr - -import ( - "golang.org/x/exp/slices" - "strconv" -) - -type ArrayPredictionContext struct { - BasePredictionContext - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - hash := murmurInit(1) - for _, parent := range parents { - hash = murmurUpdate(hash, parent.Hash()) - } - for _, returnState := range returnStates { - hash = murmurUpdate(hash, returnState) - } - hash = murmurFinish(hash, len(parents)<<1) - - return &ArrayPredictionContext{ - BasePredictionContext: BasePredictionContext{ - cachedHash: hash, - pcType: PredictionContextArray, - }, - parents: parents, - returnStates: returnStates, - } -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -// Equals is the default comparison function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Equals(o Collectable[PredictionContext]) bool { - if a == o { - return true - } - other, ok := o.(*ArrayPredictionContext) - if !ok { - return false - } - if a.cachedHash != other.Hash() { - return false // can't be same if hash is different - } - - // Must compare the actual array elements and not just the array address - // TODO: The hash hashes in all the return states anyway, to we maybe don't need to compare them here? - return slices.Equal(a.returnStates, other.returnStates) && - slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { - return x.Equals(y) - }) -} - -// Hash is the default hash function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Hash() int { - return a.BasePredictionContext.cachedHash -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} diff --git a/runtime/Go/antlr/v4/atn_config.go b/runtime/Go/antlr/v4/atn_config.go index ecb251e278..e09b761a57 100644 --- a/runtime/Go/antlr/v4/atn_config.go +++ b/runtime/Go/antlr/v4/atn_config.go @@ -29,9 +29,9 @@ type ATNConfig interface { GetSemanticContext() SemanticContext // GetContext returns the rule invocation stack associated with this configuration - GetContext() PredictionContext + GetContext() *PredictionContext // SetContext sets the rule invocation stack associated with this configuration - SetContext(PredictionContext) + SetContext(*PredictionContext) // GetReachesIntoOuterContext returns the count of references to an outer context from this configuration GetReachesIntoOuterContext() int @@ -52,7 +52,7 @@ type BaseATNConfig struct { precedenceFilterSuppressed bool state ATNState alt int - context PredictionContext + context *PredictionContext semanticContext SemanticContext reachesIntoOuterContext int } @@ -69,12 +69,12 @@ func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup - maybe dele } // NewBaseATNConfig6 creates a new BaseATNConfig instance given a state, alt and context only -func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { +func NewBaseATNConfig6(state ATNState, alt int, context *PredictionContext) *BaseATNConfig { return NewBaseATNConfig5(state, alt, context, SemanticContextNone) } // NewBaseATNConfig5 creates a new BaseATNConfig instance given a state, alt, context and semantic context -func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { +func NewBaseATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *BaseATNConfig { if semanticContext == nil { panic("semanticContext cannot be nil") // TODO: Necessary? } @@ -98,13 +98,13 @@ func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNCon } // NewBaseATNConfig1 creates a new BaseATNConfig instance given an existing config, a state, and a context only -func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { +func NewBaseATNConfig1(c ATNConfig, state ATNState, context *PredictionContext) *BaseATNConfig { return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) } // NewBaseATNConfig creates a new BaseATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors' // are just wrappers around this one. -func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { +func NewBaseATNConfig(c ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *BaseATNConfig { if semanticContext == nil { panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed } @@ -115,7 +115,7 @@ func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, se return b } -func (b *BaseATNConfig) InitBaseATNConfig(c ATNConfig, state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) { +func (b *BaseATNConfig) InitBaseATNConfig(c ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) { b.state = state b.alt = alt @@ -144,12 +144,12 @@ func (b *BaseATNConfig) GetAlt() int { } // SetContext sets the rule invocation stack associated with this configuration -func (b *BaseATNConfig) SetContext(v PredictionContext) { +func (b *BaseATNConfig) SetContext(v *PredictionContext) { b.context = v } // GetContext returns the rule invocation stack associated with this configuration -func (b *BaseATNConfig) GetContext() PredictionContext { +func (b *BaseATNConfig) GetContext() *PredictionContext { return b.context } @@ -248,7 +248,7 @@ type LexerATNConfig struct { passedThroughNonGreedyDecision bool } -func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { +func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *LexerATNConfig { return &LexerATNConfig{ BaseATNConfig: BaseATNConfig{ @@ -260,7 +260,7 @@ func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *Lex } } -func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { +func NewLexerATNConfig5(state ATNState, alt int, context *PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { return &LexerATNConfig{ BaseATNConfig: BaseATNConfig{ state: state, @@ -291,7 +291,7 @@ func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor * return lac } -func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { +func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context *PredictionContext) *LexerATNConfig { lac := &LexerATNConfig{ lexerActionExecutor: c.lexerActionExecutor, passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), @@ -301,7 +301,7 @@ func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionCon } //goland:noinspection GoUnusedExportedFunction -func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { +func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *LexerATNConfig { lac := &LexerATNConfig{ BaseATNConfig: BaseATNConfig{ state: state, diff --git a/runtime/Go/antlr/v4/atn_simulator.go b/runtime/Go/antlr/v4/atn_simulator.go index 38facd56df..e26be67199 100644 --- a/runtime/Go/antlr/v4/atn_simulator.go +++ b/runtime/Go/antlr/v4/atn_simulator.go @@ -18,12 +18,13 @@ type BaseATNSimulator struct { decisionToDFA []*DFA } -func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { +func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext { if b.sharedContextCache == nil { return context } - visited := NewJStore[PredictionContext, Comparator[PredictionContext]](pContextEqInst) + // TODO: Should this be guarded by a mutex? + visited := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst) return getCachedBasePredictionContext(context, b.sharedContextCache, visited) } diff --git a/runtime/Go/antlr/v4/atn_state.go b/runtime/Go/antlr/v4/atn_state.go index 58dec925cd..d854ef1a84 100644 --- a/runtime/Go/antlr/v4/atn_state.go +++ b/runtime/Go/antlr/v4/atn_state.go @@ -78,9 +78,9 @@ type BaseATNState struct { transitions []Transition } -//func NewBaseATNState() *BaseATNState { -// return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} -//} +func NewBaseATNState() *BaseATNState { + return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} +} func (as *BaseATNState) GetRuleIndex() int { return as.ruleIndex diff --git a/runtime/Go/antlr/v4/base_prediction_context.go b/runtime/Go/antlr/v4/base_prediction_context.go deleted file mode 100644 index bbca66e40e..0000000000 --- a/runtime/Go/antlr/v4/base_prediction_context.go +++ /dev/null @@ -1,45 +0,0 @@ -package antlr - -// BasePredictionContext is the 'abstract class' for all prediction contexts and does not exist -// in its own right. All actual [PredictionContext] structs embed this and then provide their -// own methods to implement functionality. -type BasePredictionContext struct { - cachedHash int - pcType int -} - -func (b *BasePredictionContext) Hash() int { - return b.cachedHash -} - -func (b *BasePredictionContext) Equals(_ Collectable[PredictionContext]) bool { - return false -} - -func (b *BasePredictionContext) GetParent(i int) PredictionContext { - return nil -} - -func (b *BasePredictionContext) getReturnState(i int) int { - return 0 -} - -func (b *BasePredictionContext) length() int { - return 0 -} - -func (b *BasePredictionContext) hasEmptyPath() bool { - return b.getReturnState(b.length()-1) == BasePredictionContextEmptyReturnState -} - -func (b *BasePredictionContext) String() string { - return "empty prediction context" -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func (b *BasePredictionContext) Type() int { - return b.pcType -} diff --git a/runtime/Go/antlr/v4/char_stream.go b/runtime/Go/antlr/v4/char_stream.go index c33f0adb5e..bd8127b6b5 100644 --- a/runtime/Go/antlr/v4/char_stream.go +++ b/runtime/Go/antlr/v4/char_stream.go @@ -8,5 +8,5 @@ type CharStream interface { IntStream GetText(int, int) string GetTextFromTokens(start, end Token) string - GetTextFromInterval(*Interval) string + GetTextFromInterval(Interval) string } diff --git a/runtime/Go/antlr/v4/common_token_stream.go b/runtime/Go/antlr/v4/common_token_stream.go index 2e85776fd8..96f53e6aca 100644 --- a/runtime/Go/antlr/v4/common_token_stream.go +++ b/runtime/Go/antlr/v4/common_token_stream.go @@ -27,14 +27,14 @@ type CommonTokenStream struct { // fetch: The check to prevent adding multiple EOF symbols into tokens is // trivial with bt field. fetchedEOF bool - + // index into [tokens] of the current token (next token to consume). // tokens[p] should be LT(1). It is set to -1 when the stream is first // constructed or when SetTokenSource is called, indicating that the first token // has not yet been fetched from the token source. For additional information, // see the documentation of [IntStream] for a description of initializing methods. index int - + // tokenSource is the [TokenSource] from which tokens for the bt stream are // fetched. tokenSource TokenSource @@ -246,8 +246,8 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) from := tokenIndex + 1 - -// If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token + + // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token var to int if nextOnChannel == -1 { @@ -317,7 +317,8 @@ func (c *CommonTokenStream) Index() int { } func (c *CommonTokenStream) GetAllText() string { - return c.GetTextFromInterval(nil) + c.Fill() + return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1)) } func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { @@ -332,15 +333,9 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string return c.GetTextFromInterval(interval.GetSourceInterval()) } -func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { +func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string { c.lazyInit() - - if interval == nil { - c.Fill() - interval = NewInterval(0, len(c.tokens)-1) - } else { - c.Sync(interval.Stop) - } + c.Sync(interval.Stop) start := interval.Start stop := interval.Stop diff --git a/runtime/Go/antlr/v4/comparators.go b/runtime/Go/antlr/v4/comparators.go index 96cb7b06f7..da5a116b40 100644 --- a/runtime/Go/antlr/v4/comparators.go +++ b/runtime/Go/antlr/v4/comparators.go @@ -29,7 +29,7 @@ var ( dfaStateEqInst = &ObjEqComparator[*DFAState]{} semctxEqInst = &ObjEqComparator[SemanticContext]{} atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{} - pContextEqInst = &ObjEqComparator[PredictionContext]{} + pContextEqInst = &ObjEqComparator[*PredictionContext]{} ) // Equals2 delegates to the Equals() method of type T diff --git a/runtime/Go/antlr/v4/empty_prediction_context.go b/runtime/Go/antlr/v4/empty_prediction_context.go deleted file mode 100644 index c4d336275e..0000000000 --- a/runtime/Go/antlr/v4/empty_prediction_context.go +++ /dev/null @@ -1,56 +0,0 @@ -package antlr - -var _emptyPredictionContextHash int - -func init() { - _emptyPredictionContextHash = murmurInit(1) - _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) -} - -func calculateEmptyHash() int { - return _emptyPredictionContextHash -} - -type EmptyPredictionContext struct { - BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - return &EmptyPredictionContext{ - BaseSingletonPredictionContext: BaseSingletonPredictionContext{ - BasePredictionContext: BasePredictionContext{ - cachedHash: calculateEmptyHash(), - pcType: PredictionContextEmpty, - }, - parentCtx: nil, - returnState: BasePredictionContextEmptyReturnState, - }, - } -} -func (e *EmptyPredictionContext) length() int { - return 1 -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(_ int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(_ int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) Hash() int { - return e.cachedHash -} - -func (e *EmptyPredictionContext) Equals(other Collectable[PredictionContext]) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} diff --git a/runtime/Go/antlr/v4/input_stream.go b/runtime/Go/antlr/v4/input_stream.go index 9b100fd3a0..c02bbc76d3 100644 --- a/runtime/Go/antlr/v4/input_stream.go +++ b/runtime/Go/antlr/v4/input_stream.go @@ -111,7 +111,7 @@ func (is *InputStream) GetTextFromTokens(start, stop Token) string { return "" } -func (is *InputStream) GetTextFromInterval(i *Interval) string { +func (is *InputStream) GetTextFromInterval(i Interval) string { return is.GetText(i.Start, i.Stop) } diff --git a/runtime/Go/antlr/v4/interval_set.go b/runtime/Go/antlr/v4/interval_set.go index 96f1a8b15e..649338ba33 100644 --- a/runtime/Go/antlr/v4/interval_set.go +++ b/runtime/Go/antlr/v4/interval_set.go @@ -15,21 +15,20 @@ type Interval struct { } // NewInterval creates a new interval with the given start and stop values. -func NewInterval(start, stop int) *Interval { - i := new(Interval) - - i.Start = start - i.Stop = stop - return i +func NewInterval(start, stop int) Interval { + return Interval{ + Start: start, + Stop: stop, + } } // Contains returns true if the given item is contained within the interval. -func (i *Interval) Contains(item int) bool { +func (i Interval) Contains(item int) bool { return item >= i.Start && item < i.Stop } // String generates a string representation of the interval. -func (i *Interval) String() string { +func (i Interval) String() string { if i.Start == i.Stop-1 { return strconv.Itoa(i.Start) } @@ -38,13 +37,13 @@ func (i *Interval) String() string { } // Length returns the length of the interval. -func (i *Interval) Length() int { +func (i Interval) Length() int { return i.Stop - i.Start } // IntervalSet represents a collection of [Intervals], which may be read-only. type IntervalSet struct { - intervals []*Interval + intervals []Interval readOnly bool } @@ -89,16 +88,16 @@ func (i *IntervalSet) addRange(l, h int) { i.addInterval(NewInterval(l, h+1)) } -func (i *IntervalSet) addInterval(v *Interval) { +func (i *IntervalSet) addInterval(v Interval) { if i.intervals == nil { - i.intervals = make([]*Interval, 0) + i.intervals = make([]Interval, 0) i.intervals = append(i.intervals, v) } else { // find insert pos for k, interval := range i.intervals { // distinct range -> insert if v.Stop < interval.Start { - i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...) return } else if v.Stop == interval.Start { i.intervals[k].Start = v.Start @@ -159,15 +158,15 @@ func (i *IntervalSet) contains(item int) bool { func (i *IntervalSet) length() int { iLen := 0 - + for _, v := range i.intervals { iLen += v.Length() } - + return iLen } -func (i *IntervalSet) removeRange(v *Interval) { +func (i *IntervalSet) removeRange(v Interval) { if v.Start == v.Stop-1 { i.removeOne(v.Start) } else if i.intervals != nil { @@ -181,7 +180,7 @@ func (i *IntervalSet) removeRange(v *Interval) { i.intervals[k] = NewInterval(ni.Start, v.Start) x := NewInterval(v.Stop, ni.Stop) // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) return } else if v.Start <= ni.Start && v.Stop >= ni.Stop { // i.intervals.splice(k, 1) @@ -218,7 +217,7 @@ func (i *IntervalSet) removeOne(v int) { x := NewInterval(ki.Start, v) ki.Start = v + 1 // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) return } } @@ -242,7 +241,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin return i.toIndexString() } -func (i *IntervalSet) GetIntervals() []*Interval { +func (i *IntervalSet) GetIntervals() []Interval { return i.intervals } diff --git a/runtime/Go/antlr/v4/lexer_atn_simulator.go b/runtime/Go/antlr/v4/lexer_atn_simulator.go index 2a16341999..bca6444c5d 100644 --- a/runtime/Go/antlr/v4/lexer_atn_simulator.go +++ b/runtime/Go/antlr/v4/lexer_atn_simulator.go @@ -122,7 +122,7 @@ func (l *LexerATNSimulator) reset() { func (l *LexerATNSimulator) MatchATN(input CharStream) int { startState := l.atn.modeToStartState[l.mode] - + if //goland:noinspection GoBoolExpressions LexerATNSimulatorDebug { fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) @@ -135,7 +135,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { next := l.addDFAState(s0Closure, suppressEdge) predict := l.execATN(input, next) - + if //goland:noinspection GoBoolExpressions LexerATNSimulatorDebug { fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) @@ -144,7 +144,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { } func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - + if //goland:noinspection GoBoolExpressions LexerATNSimulatorDebug { fmt.Println("start state closure=" + ds0.configs.String()) @@ -289,10 +289,10 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { continue } - + if //goland:noinspection GoBoolExpressions LexerATNSimulatorDebug { - + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) } @@ -358,7 +358,7 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord // The func returns true if an accept state is reached. func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - + if //goland:noinspection GoBoolExpressions LexerATNSimulatorDebug { fmt.Println("closure(" + config.String() + ")") @@ -366,7 +366,7 @@ func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, co _, ok := config.state.(*RuleStopState) if ok { - + if //goland:noinspection GoBoolExpressions LexerATNSimulatorDebug { if l.recog != nil { @@ -448,7 +448,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC // test them, we cannot cash the DFA state target of ID. pt := trans.(*PredicateTransition) - + if //goland:noinspection GoBoolExpressions LexerATNSimulatorDebug { fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) diff --git a/runtime/Go/antlr/v4/ll1_analyzer.go b/runtime/Go/antlr/v4/ll1_analyzer.go index 4b46396eff..0b86272bfd 100644 --- a/runtime/Go/antlr/v4/ll1_analyzer.go +++ b/runtime/Go/antlr/v4/ll1_analyzer.go @@ -38,11 +38,11 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { count := len(s.GetTransitions()) look := make([]*IntervalSet, count) for alt := 0; alt < count; alt++ { - + look[alt] = NewIntervalSet() lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) - + // Wipe out lookahead for la alternative if we found nothing, // or we had a predicate when we !seeThruPreds if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { @@ -71,7 +71,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { // specified ctx. func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { r := NewIntervalSet() - var lookContext PredictionContext + var lookContext *PredictionContext if ctx != nil { lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) } @@ -109,25 +109,25 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet // outermost context is reached. This parameter has no effect if {@code ctx} // is {@code nil}. -func (la *LL1Analyzer) look2(_, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { - +func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { + returnState := la.atn.states[ctx.getReturnState(i)] la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - + } -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { + c := NewBaseATNConfig6(s, 0, ctx) - + if lookBusy.Contains(c) { return } - + _, present := lookBusy.Put(c) if present { return - + } if s == stopState { if ctx == nil { @@ -138,9 +138,9 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look return } } - + _, ok := s.(*RuleStopState) - + if ok { if ctx == nil { look.addOne(TokenEpsilon) @@ -149,8 +149,8 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look look.addOne(TokenEOF) return } - - if ctx != BasePredictionContextEMPTY { + + if ctx.pcType != PredictionContextEmpty { removed := calledRuleStack.contains(s.GetRuleIndex()) defer func() { if removed { @@ -166,17 +166,17 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look return } } - + n := len(s.GetTransitions()) - + for i := 0; i < n; i++ { t := s.GetTransitions()[i] - + if t1, ok := t.(*RuleTransition); ok { if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { continue } - + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) } else if t2, ok := t.(AbstractPredicateTransition); ok { @@ -201,15 +201,15 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look } } -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { - +func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - + defer func() { calledRuleStack.remove(t1.getTarget().GetRuleIndex()) }() - + calledRuleStack.add(t1.getTarget().GetRuleIndex()) la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - + } diff --git a/runtime/Go/antlr/v4/parser_atn_simulator.go b/runtime/Go/antlr/v4/parser_atn_simulator.go index db9e3ebfc8..bd047da056 100644 --- a/runtime/Go/antlr/v4/parser_atn_simulator.go +++ b/runtime/Go/antlr/v4/parser_atn_simulator.go @@ -732,7 +732,7 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full // calling [Parser].getPrecedence). func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { - statesFromAlt1 := make(map[int]PredictionContext) + statesFromAlt1 := make(map[int]*PredictionContext) configSet := NewBaseATNConfigSet(configs.FullContext()) for _, config := range configs.GetItems() { diff --git a/runtime/Go/antlr/v4/parser_rule_context.go b/runtime/Go/antlr/v4/parser_rule_context.go index 2e37e1f022..38d524a9c7 100644 --- a/runtime/Go/antlr/v4/parser_rule_context.go +++ b/runtime/Go/antlr/v4/parser_rule_context.go @@ -11,28 +11,28 @@ import ( type ParserRuleContext interface { RuleContext - + SetException(RecognitionException) - + AddTokenNode(token Token) *TerminalNodeImpl AddErrorNode(badToken Token) *ErrorNodeImpl - + EnterRule(listener ParseTreeListener) ExitRule(listener ParseTreeListener) - + SetStart(Token) GetStart() Token - + SetStop(Token) GetStop() Token - + AddChild(child RuleContext) RuleContext RemoveLastChild() } type BaseParserRuleContext struct { *BaseRuleContext - + start, stop Token exception RecognitionException children []Tree @@ -40,9 +40,9 @@ type BaseParserRuleContext struct { func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { prc := new(BaseParserRuleContext) - + prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) - + prc.RuleIndex = -1 // * If we are debugging or building a parse tree for a Visitor, // we need to track all of the tokens and rule invocations associated @@ -56,7 +56,7 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) // The exception that forced prc rule to return. If the rule successfully // completed, prc is {@code nil}. prc.exception = nil - + return prc } @@ -81,12 +81,12 @@ func (prc *BaseParserRuleContext) GetText() string { if prc.GetChildCount() == 0 { return "" } - + var s string for _, child := range prc.children { s += child.(ParseTree).GetText() } - + return s } @@ -131,12 +131,12 @@ func (prc *BaseParserRuleContext) RemoveLastChild() { } func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { - + node := NewTerminalNodeImpl(token) prc.addTerminalNodeChild(node) node.parentCtx = prc return node - + } func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { @@ -150,7 +150,7 @@ func (prc *BaseParserRuleContext) GetChild(i int) Tree { if prc.children != nil && len(prc.children) >= i { return prc.children[i] } - + return nil } @@ -158,18 +158,18 @@ func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) if childType == nil { return prc.GetChild(i).(RuleContext) } - + for j := 0; j < len(prc.children); j++ { child := prc.children[j] if reflect.TypeOf(child) == childType { if i == 0 { return child.(RuleContext) } - + i-- } } - + return nil } @@ -202,7 +202,7 @@ func (prc *BaseParserRuleContext) GetStop() Token { } func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { - + for j := 0; j < len(prc.children); j++ { child := prc.children[j] if c2, ok := child.(TerminalNode); ok { @@ -210,7 +210,7 @@ func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { if i == 0 { return c2 } - + i-- } } @@ -222,9 +222,9 @@ func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { if prc.children == nil { return make([]TerminalNode, 0) } - + tokens := make([]TerminalNode, 0) - + for j := 0; j < len(prc.children); j++ { child := prc.children[j] if tchild, ok := child.(TerminalNode); ok { @@ -233,7 +233,7 @@ func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { } } } - + return tokens } @@ -245,12 +245,12 @@ func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleCont if prc.children == nil || i < 0 || i >= len(prc.children) { return nil } - + j := -1 // what element have we found with ctxType? for _, o := range prc.children { - + childType := reflect.TypeOf(o) - + if childType.Implements(ctxType) { j++ if j == i { @@ -272,12 +272,12 @@ func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []R if prc.children == nil { return make([]RuleContext, 0) } - + contexts := make([]RuleContext, 0) - + for _, child := range prc.children { childType := reflect.TypeOf(child) - + if childType.ConvertibleTo(ctxType) { contexts = append(contexts, child.(RuleContext)) } @@ -289,15 +289,15 @@ func (prc *BaseParserRuleContext) GetChildCount() int { if prc.children == nil { return 0 } - + return len(prc.children) } -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { +func (prc *BaseParserRuleContext) GetSourceInterval() Interval { if prc.start == nil || prc.stop == nil { return TreeInvalidInterval } - + return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) } @@ -308,7 +308,7 @@ func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { // func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { - + var p ParserRuleContext = prc s := "[" for p != nil && p != stop { @@ -352,12 +352,12 @@ type BaseInterpreterRuleContext struct { //goland:noinspection GoUnusedExportedFunction func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { - + prc := new(BaseInterpreterRuleContext) - + prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) - + prc.RuleIndex = ruleIndex - + return prc } diff --git a/runtime/Go/antlr/v4/prediction_context.go b/runtime/Go/antlr/v4/prediction_context.go index 40c53c9b48..87f9e0bab9 100644 --- a/runtime/Go/antlr/v4/prediction_context.go +++ b/runtime/Go/antlr/v4/prediction_context.go @@ -6,19 +6,19 @@ package antlr import ( "fmt" + "golang.org/x/exp/slices" + "strconv" ) -// PredictionContext defines the interface that must be implemented by any flavor of prediction context. -type PredictionContext interface { - Hash() int - Equals(collectable Collectable[PredictionContext]) bool - GetParent(int) PredictionContext - getReturnState(int) int - length() int - isEmpty() bool - hasEmptyPath() bool - String() string - Type() int +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash } const ( @@ -46,7 +46,243 @@ const ( PredictionContextArray ) -func calculateHash(parent PredictionContext, returnState int) int { +// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to +// emulate inheritance from Java, and can be used without an interface definition. An interface +// is not required because no user code will ever need to implement this interface. +type PredictionContext struct { + cachedHash int + pcType int + parentCtx *PredictionContext + returnState int + parents []*PredictionContext + returnStates []int +} + +func NewEmptyPredictionContext() *PredictionContext { + return &PredictionContext{ + cachedHash: calculateEmptyHash(), + pcType: PredictionContextEmpty, + returnState: BasePredictionContextEmptyReturnState, + } +} + +func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext { + pc := &PredictionContext{ + pcType: PredictionContextSingleton, + returnState: returnState, + parentCtx: parent, + } + if parent != nil { + pc.cachedHash = calculateHash(parent, returnState) + } else { + pc.cachedHash = calculateEmptyHash() + } + return pc +} + +func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + hash = murmurFinish(hash, len(parents)<<1) + + return &PredictionContext{ + cachedHash: hash, + pcType: PredictionContextArray, + parents: parents, + returnStates: returnStates, + } +} + +func (p *PredictionContext) Hash() int { + return p.cachedHash +} + +func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + switch p.pcType { + case PredictionContextEmpty: + return other == nil || other.(*PredictionContext).isEmpty() + case PredictionContextSingleton: + return p.SingletonEquals(other) + case PredictionContextArray: + return p.ArrayEquals(other) + } + return false +} + +func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool { + if o == nil { + return false + } + other := o.(*PredictionContext) + if other.pcType != PredictionContextArray { + return false + } + if p.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(p.returnStates, other.returnStates) && + slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool { + return x.Equals(y) + }) +} + +func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { + if other == nil { + return false + } + otherP := other.(*PredictionContext) + + if p.cachedHash != otherP.Hash() { + return false // Can't be same if hash is different + } + + if p.returnState != otherP.getReturnState(0) { + return false + } + + // Both parents must be nil if one is + if p.parentCtx == nil { + return otherP.parentCtx == nil + } + + return p.parentCtx.Equals(otherP.parentCtx) +} + +func (p *PredictionContext) GetParent(i int) *PredictionContext { + switch p.pcType { + case PredictionContextEmpty: + return nil + case PredictionContextSingleton: + return p.parentCtx + case PredictionContextArray: + return p.parents[i] + } + return nil +} + +func (p *PredictionContext) getReturnState(i int) int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates[i] + default: + return p.returnState + } +} + +func (p *PredictionContext) GetReturnStates() []int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates + default: + return []int{p.returnState} + } +} + +func (p *PredictionContext) length() int { + switch p.pcType { + case PredictionContextArray: + return len(p.returnStates) + default: + return 1 + } +} + +func (p *PredictionContext) hasEmptyPath() bool { + switch p.pcType { + case PredictionContextSingleton: + return p.returnState == BasePredictionContextEmptyReturnState + } + return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState +} + +func (p *PredictionContext) String() string { + switch p.pcType { + case PredictionContextEmpty: + return "$" + case PredictionContextSingleton: + var up string + + if p.parentCtx == nil { + up = "" + } else { + up = p.parentCtx.String() + } + + if len(up) == 0 { + if p.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(p.returnState) + } + + return strconv.Itoa(p.returnState) + " " + up + case PredictionContextArray: + if p.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(p.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if p.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(p.returnStates[i]) + if !p.parents[i].isEmpty() { + s = s + " " + p.parents[i].String() + } else { + s = s + "nil" + } + } + return s + "]" + + default: + return "unknown" + } +} + +func (p *PredictionContext) isEmpty() bool { + switch p.pcType { + case PredictionContextEmpty: + return true + case PredictionContextArray: + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return p.returnStates[0] == BasePredictionContextEmptyReturnState + default: + return false + } +} + +func (p *PredictionContext) Type() int { + return p.pcType +} + +func calculateHash(parent *PredictionContext, returnState int) int { h := murmurInit(1) h = murmurUpdate(h, parent.Hash()) h = murmurUpdate(h, returnState) @@ -56,7 +292,7 @@ func calculateHash(parent PredictionContext, returnState int) int { // Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. // Return {@link //EMPTY} if {@code outerContext} is empty or nil. // / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext { if outerContext == nil { outerContext = ParserRuleContextEmpty } @@ -73,7 +309,7 @@ func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) Predicti return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) } -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { +func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) *PredictionContext { // Share same graph if both same // @@ -81,17 +317,8 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) return a } - // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test - // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created - // from it. - // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion - // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from - // either of them. - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { + return mergeSingletons(a, b, rootIsWildcard, mergeCache) } // At least one of a or b is array // If one is $ and rootIsWildcard, return $ as wildcard @@ -105,26 +332,25 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) } // Convert either Singleton or Empty to arrays, so that we can merge them - var ara, arb *ArrayPredictionContext - - ara = convertToArray(a) - arb = convertToArray(b) + // + ara := convertToArray(a) + arb := convertToArray(b) return mergeArrays(ara, arb, rootIsWildcard, mergeCache) } -func convertToArray(pc PredictionContext) *ArrayPredictionContext { +func convertToArray(pc *PredictionContext) *PredictionContext { switch pc.Type() { case PredictionContextEmpty: - return NewArrayPredictionContext([]PredictionContext{}, []int{}) + return NewArrayPredictionContext([]*PredictionContext{}, []int{}) case PredictionContextSingleton: - return NewArrayPredictionContext([]PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)}) + return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)}) default: // Already an array } - return pc.(*ArrayPredictionContext) + return pc } -// mergeSingletons merges two [SingletonBasePredictionContext] instances. +// mergeSingletons merges two Singleton [PredictionContext] instances. // // Stack tops equal, parents merge is same return left graph. //
// //goland:noinspection GoBoolExpressions -func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { +func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) *PredictionContext { if mergeCache != nil { previous := mergeCache.Get(a.Hash(), b.Hash()) if previous != nil { if ParserATNSimulatorTraceATNSim { fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") } - return previous.(PredictionContext) + return previous.(*PredictionContext) } previous = mergeCache.Get(b.Hash(), a.Hash()) if previous != nil { if ParserATNSimulatorTraceATNSim { fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") } - return previous.(PredictionContext) + return previous.(*PredictionContext) } } // merge sorted payloads a + b => M @@ -336,7 +562,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * k := 0 // walks target M array mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) - mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates)) // walk and merge to yield mergedParents, mergedReturnStates for i < len(a.returnStates) && j < len(b.returnStates) { aParent := a.parents[i] @@ -346,7 +572,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * payload := a.returnStates[i] // $+$ = $ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil - axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax + axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax // -> // ax if bothDollars || axAX { @@ -430,24 +656,24 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * return M } -// Make pass over all M {@code parents} merge any {@code equals()} -// ones. -// / -func combineCommonParents(parents []PredictionContext) { - uniqueParents := make(map[PredictionContext]PredictionContext) +// Make pass over all M parents and merge any Equals() ones. +// Note: This is not used in Go as we are not using pointers in the slice anyway, but I have kept it for reference +// and if we ever need to use pointers in the slice. +//goland:noinspection GoUnusedFunction +func combineCommonParents(parents []*PredictionContext) { + uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst) for p := 0; p < len(parents); p++ { parent := parents[p] - if uniqueParents[parent] == nil { - uniqueParents[parent] = parent - } + _, _ = uniqueParents.Put(parent) } for q := 0; q < len(parents); q++ { - parents[q] = uniqueParents[parents[q]] + pc, _ := uniqueParents.Get(parents[q]) + parents[q] = pc } } -func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited *JStore[PredictionContext, Comparator[PredictionContext]]) PredictionContext { +func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *JStore[*PredictionContext, Comparator[*PredictionContext]]) *PredictionContext { if context.isEmpty() { return context @@ -462,12 +688,12 @@ func getCachedBasePredictionContext(context PredictionContext, contextCache *Pre return existing } changed := false - parents := make([]PredictionContext, context.length()) + parents := make([]*PredictionContext, context.length()) for i := 0; i < len(parents); i++ { parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) if changed || !parent.Equals(context.GetParent(i)) { if !changed { - parents = make([]PredictionContext, context.length()) + parents = make([]*PredictionContext, context.length()) for j := 0; j < context.length(); j++ { parents[j] = context.GetParent(j) } @@ -481,13 +707,13 @@ func getCachedBasePredictionContext(context PredictionContext, contextCache *Pre _, _ = visited.Put(context) return context } - var updated PredictionContext + var updated *PredictionContext if len(parents) == 0 { updated = BasePredictionContextEMPTY } else if len(parents) == 1 { updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) } else { - updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) + updated = NewArrayPredictionContext(parents, context.GetReturnStates()) } contextCache.add(updated) visited.Put(updated) diff --git a/runtime/Go/antlr/v4/prediction_context_cache.go b/runtime/Go/antlr/v4/prediction_context_cache.go index d2520566a9..2e4390acf6 100644 --- a/runtime/Go/antlr/v4/prediction_context_cache.go +++ b/runtime/Go/antlr/v4/prediction_context_cache.go @@ -6,20 +6,19 @@ var BasePredictionContextEMPTY = NewEmptyPredictionContext() // context cash associated with contexts in DFA states. This cache // can be used for both lexers and parsers. type PredictionContextCache struct { - //cache map[PredictionContext]PredictionContext - cache *JStore[PredictionContext, Comparator[PredictionContext]] + cache *JStore[*PredictionContext, Comparator[*PredictionContext]] } func NewPredictionContextCache() *PredictionContextCache { return &PredictionContextCache{ - cache: NewJStore[PredictionContext, Comparator[PredictionContext]](pContextEqInst), + cache: NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst), } } // Add a context to the cache and return it. If the context already exists, // return that one instead and do not add a new context to the cache. // Protect shared cache from unsafe thread access. -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { +func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext { if ctx.isEmpty() { return BasePredictionContextEMPTY } @@ -31,7 +30,7 @@ func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { return pc } -func (p *PredictionContextCache) Get(ctx PredictionContext) (PredictionContext, bool) { +func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) { pc, exists := p.cache.Get(ctx) return pc, exists } diff --git a/runtime/Go/antlr/v4/singleton_prediction_context.go b/runtime/Go/antlr/v4/singleton_prediction_context.go deleted file mode 100644 index 15d4a1644e..0000000000 --- a/runtime/Go/antlr/v4/singleton_prediction_context.go +++ /dev/null @@ -1,104 +0,0 @@ -package antlr - -import "strconv" - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - BasePredictionContext - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) PredictionContext { - var cachedHash int - if parent != nil { - cachedHash = calculateHash(parent, returnState) - } else { - cachedHash = calculateEmptyHash() - } - return &BaseSingletonPredictionContext{ - BasePredictionContext: BasePredictionContext{ - cachedHash: cachedHash, - pcType: PredictionContextSingleton, - }, - parentCtx: parent, - returnState: returnState, - } -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(_ int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(_ int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) Hash() int { - return b.cachedHash -} - -func (b *BaseSingletonPredictionContext) Equals(other Collectable[PredictionContext]) bool { - if b == other { - return true - } - if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.cachedHash != otherP.Hash() { - return false // Can't be same if hash is different - } - - if b.returnState != otherP.getReturnState(0) { - return false - } - - // Both parents must be nil if one is - if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.Equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} diff --git a/runtime/Go/antlr/v4/token_stream.go b/runtime/Go/antlr/v4/token_stream.go index 1527d43f60..d516cf36bd 100644 --- a/runtime/Go/antlr/v4/token_stream.go +++ b/runtime/Go/antlr/v4/token_stream.go @@ -14,7 +14,7 @@ type TokenStream interface { SetTokenSource(TokenSource) GetAllText() string - GetTextFromInterval(*Interval) string + GetTextFromInterval(Interval) string GetTextFromRuleContext(RuleContext) string GetTextFromTokens(Token, Token) string } diff --git a/runtime/Go/antlr/v4/tokenstream_rewriter.go b/runtime/Go/antlr/v4/tokenstream_rewriter.go index 4c60056d08..9d0c97283a 100644 --- a/runtime/Go/antlr/v4/tokenstream_rewriter.go +++ b/runtime/Go/antlr/v4/tokenstream_rewriter.go @@ -94,7 +94,7 @@ const ( // Define the rewrite operation hierarchy type RewriteOperation interface { - + // Execute the rewrite operation by possibly adding to the buffer. // Return the index of the next token to operate on. Execute(buffer *bytes.Buffer) int @@ -441,7 +441,7 @@ func (tsr *TokenStreamRewriter) GetTextDefault() string { // GetText returns the text from the original tokens altered per the // instructions given to this rewriter. -func (tsr *TokenStreamRewriter) GetText(programName string, interval *Interval) string { +func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string { rewrites := tsr.programs[programName] start := interval.Start stop := interval.Stop diff --git a/runtime/Go/antlr/v4/tree.go b/runtime/Go/antlr/v4/tree.go index b9abb89d3c..9f882ba154 100644 --- a/runtime/Go/antlr/v4/tree.go +++ b/runtime/Go/antlr/v4/tree.go @@ -21,16 +21,13 @@ type Tree interface { type SyntaxTree interface { Tree - - GetSourceInterval() *Interval + GetSourceInterval() Interval } type ParseTree interface { SyntaxTree - Accept(Visitor ParseTreeVisitor) interface{} GetText() string - ToStringTree([]string, Recognizer) string } @@ -43,7 +40,6 @@ type RuleNode interface { type TerminalNode interface { ParseTree - GetSymbol() Token } @@ -108,8 +104,7 @@ func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {} type TerminalNodeImpl struct { parentCtx RuleContext - - symbol Token + symbol Token } var _ TerminalNode = &TerminalNodeImpl{} @@ -151,7 +146,7 @@ func (t *TerminalNodeImpl) GetPayload() interface{} { return t.symbol } -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { +func (t *TerminalNodeImpl) GetSourceInterval() Interval { if t.symbol == nil { return TreeInvalidInterval }