Commit 8d08ef21 authored by Kateřina Sloupová's avatar Kateřina Sloupová
Browse files

added regular expressions - parser and transformation to EFA

parent ef4e5e17
Pipeline #56516 failed with stage
in 24 seconds
grammar REG_grammar;
/* Parser Rules */
start : (expr | <EOF>);
expr : concatenated+
| expr CONCAT expr
| expr UNION expr
;
concatenated: (symbol | (iterable (ITER | POS_ITER)) | parentheses);
iterable: (symbol | parentheses);
parentheses: LEFT_PAR expr RIGHT_PAR;
symbol : (ALPHABET | EPSILON | EMPTYSET);
/* Lexer Rules */
/* Tokens */
LEFT_PAR : '(';
RIGHT_PAR: ')';
ITER : ('*' | '^*');
POS_ITER : '^+';
CONCAT : '.';
UNION : '+';
ALPHABET : [a-zA-Z0-9];
EPSILON : 'ε';
EMPTYSET : '∅';
/* Characters to be ignored */
WS : [ \r\t\n]+ -> skip ;
token literal names:
null
'('
')'
null
'^+'
'.'
'+'
null
'ε'
'∅'
null
token symbolic names:
null
LEFT_PAR
RIGHT_PAR
ITER
POS_ITER
CONCAT
UNION
ALPHABET
EPSILON
EMPTYSET
WS
rule names:
start
expr
concatenated
iterable
parentheses
symbol
atn:
[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 12, 53, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 3, 2, 3, 2, 5, 2, 17, 10, 2, 3, 3, 3, 3, 6, 3, 21, 10, 3, 13, 3, 14, 3, 22, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 7, 3, 31, 10, 3, 12, 3, 14, 3, 34, 11, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 41, 10, 4, 3, 5, 3, 5, 5, 5, 45, 10, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 2, 3, 4, 8, 2, 4, 6, 8, 10, 12, 2, 4, 3, 2, 5, 6, 3, 2, 9, 11, 2, 53, 2, 16, 3, 2, 2, 2, 4, 18, 3, 2, 2, 2, 6, 40, 3, 2, 2, 2, 8, 44, 3, 2, 2, 2, 10, 46, 3, 2, 2, 2, 12, 50, 3, 2, 2, 2, 14, 17, 5, 4, 3, 2, 15, 17, 3, 2, 2, 2, 16, 14, 3, 2, 2, 2, 16, 15, 3, 2, 2, 2, 17, 3, 3, 2, 2, 2, 18, 20, 8, 3, 1, 2, 19, 21, 5, 6, 4, 2, 20, 19, 3, 2, 2, 2, 21, 22, 3, 2, 2, 2, 22, 20, 3, 2, 2, 2, 22, 23, 3, 2, 2, 2, 23, 32, 3, 2, 2, 2, 24, 25, 12, 4, 2, 2, 25, 26, 7, 7, 2, 2, 26, 31, 5, 4, 3, 5, 27, 28, 12, 3, 2, 2, 28, 29, 7, 8, 2, 2, 29, 31, 5, 4, 3, 4, 30, 24, 3, 2, 2, 2, 30, 27, 3, 2, 2, 2, 31, 34, 3, 2, 2, 2, 32, 30, 3, 2, 2, 2, 32, 33, 3, 2, 2, 2, 33, 5, 3, 2, 2, 2, 34, 32, 3, 2, 2, 2, 35, 41, 5, 12, 7, 2, 36, 37, 5, 8, 5, 2, 37, 38, 9, 2, 2, 2, 38, 41, 3, 2, 2, 2, 39, 41, 5, 10, 6, 2, 40, 35, 3, 2, 2, 2, 40, 36, 3, 2, 2, 2, 40, 39, 3, 2, 2, 2, 41, 7, 3, 2, 2, 2, 42, 45, 5, 12, 7, 2, 43, 45, 5, 10, 6, 2, 44, 42, 3, 2, 2, 2, 44, 43, 3, 2, 2, 2, 45, 9, 3, 2, 2, 2, 46, 47, 7, 3, 2, 2, 47, 48, 5, 4, 3, 2, 48, 49, 7, 4, 2, 2, 49, 11, 3, 2, 2, 2, 50, 51, 9, 3, 2, 2, 51, 13, 3, 2, 2, 2, 8, 16, 22, 30, 32, 40, 44]
\ No newline at end of file
LEFT_PAR=1
RIGHT_PAR=2
ITER=3
POS_ITER=4
CONCAT=5
UNION=6
ALPHABET=7
EPSILON=8
EMPTYSET=9
WS=10
'('=1
')'=2
'^+'=4
'.'=5
'+'=6
'ε'=8
'∅'=9
token literal names:
null
'('
')'
null
'^+'
'.'
'+'
null
'ε'
'∅'
null
token symbolic names:
null
LEFT_PAR
RIGHT_PAR
ITER
POS_ITER
CONCAT
UNION
ALPHABET
EPSILON
EMPTYSET
WS
rule names:
LEFT_PAR
RIGHT_PAR
ITER
POS_ITER
CONCAT
UNION
ALPHABET
EPSILON
EMPTYSET
WS
channel names:
DEFAULT_TOKEN_CHANNEL
HIDDEN
mode names:
DEFAULT_MODE
atn:
[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 12, 52, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 5, 4, 31, 10, 4, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 11, 6, 11, 47, 10, 11, 13, 11, 14, 11, 48, 3, 11, 3, 11, 2, 2, 12, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 3, 2, 4, 5, 2, 50, 59, 67, 92, 99, 124, 5, 2, 11, 12, 15, 15, 34, 34, 2, 53, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 3, 23, 3, 2, 2, 2, 5, 25, 3, 2, 2, 2, 7, 30, 3, 2, 2, 2, 9, 32, 3, 2, 2, 2, 11, 35, 3, 2, 2, 2, 13, 37, 3, 2, 2, 2, 15, 39, 3, 2, 2, 2, 17, 41, 3, 2, 2, 2, 19, 43, 3, 2, 2, 2, 21, 46, 3, 2, 2, 2, 23, 24, 7, 42, 2, 2, 24, 4, 3, 2, 2, 2, 25, 26, 7, 43, 2, 2, 26, 6, 3, 2, 2, 2, 27, 31, 7, 44, 2, 2, 28, 29, 7, 96, 2, 2, 29, 31, 7, 44, 2, 2, 30, 27, 3, 2, 2, 2, 30, 28, 3, 2, 2, 2, 31, 8, 3, 2, 2, 2, 32, 33, 7, 96, 2, 2, 33, 34, 7, 45, 2, 2, 34, 10, 3, 2, 2, 2, 35, 36, 7, 48, 2, 2, 36, 12, 3, 2, 2, 2, 37, 38, 7, 45, 2, 2, 38, 14, 3, 2, 2, 2, 39, 40, 9, 2, 2, 2, 40, 16, 3, 2, 2, 2, 41, 42, 7, 951, 2, 2, 42, 18, 3, 2, 2, 2, 43, 44, 7, 8711, 2, 2, 44, 20, 3, 2, 2, 2, 45, 47, 9, 3, 2, 2, 46, 45, 3, 2, 2, 2, 47, 48, 3, 2, 2, 2, 48, 46, 3, 2, 2, 2, 48, 49, 3, 2, 2, 2, 49, 50, 3, 2, 2, 2, 50, 51, 8, 11, 2, 2, 51, 22, 3, 2, 2, 2, 5, 2, 30, 48, 3, 8, 2, 2]
\ No newline at end of file
# Generated from REG_grammar.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\f")
buf.write("\64\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write("\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\3\2\3\2\3\3\3\3\3")
buf.write("\4\3\4\3\4\5\4\37\n\4\3\5\3\5\3\5\3\6\3\6\3\7\3\7\3\b")
buf.write("\3\b\3\t\3\t\3\n\3\n\3\13\6\13/\n\13\r\13\16\13\60\3\13")
buf.write("\3\13\2\2\f\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25")
buf.write("\f\3\2\4\5\2\62;C\\c|\5\2\13\f\17\17\"\"\2\65\2\3\3\2")
buf.write("\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2")
buf.write("\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2")
buf.write("\25\3\2\2\2\3\27\3\2\2\2\5\31\3\2\2\2\7\36\3\2\2\2\t ")
buf.write("\3\2\2\2\13#\3\2\2\2\r%\3\2\2\2\17\'\3\2\2\2\21)\3\2\2")
buf.write("\2\23+\3\2\2\2\25.\3\2\2\2\27\30\7*\2\2\30\4\3\2\2\2\31")
buf.write("\32\7+\2\2\32\6\3\2\2\2\33\37\7,\2\2\34\35\7`\2\2\35\37")
buf.write("\7,\2\2\36\33\3\2\2\2\36\34\3\2\2\2\37\b\3\2\2\2 !\7`")
buf.write("\2\2!\"\7-\2\2\"\n\3\2\2\2#$\7\60\2\2$\f\3\2\2\2%&\7-")
buf.write("\2\2&\16\3\2\2\2\'(\t\2\2\2(\20\3\2\2\2)*\7\u03b7\2\2")
buf.write("*\22\3\2\2\2+,\7\u2207\2\2,\24\3\2\2\2-/\t\3\2\2.-\3\2")
buf.write("\2\2/\60\3\2\2\2\60.\3\2\2\2\60\61\3\2\2\2\61\62\3\2\2")
buf.write("\2\62\63\b\13\2\2\63\26\3\2\2\2\5\2\36\60\3\b\2\2")
return buf.getvalue()
class REG_grammarLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
LEFT_PAR = 1
RIGHT_PAR = 2
ITER = 3
POS_ITER = 4
CONCAT = 5
UNION = 6
ALPHABET = 7
EPSILON = 8
EMPTYSET = 9
WS = 10
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'('", "')'", "'^+'", "'.'", "'+'", "'\u03B5'", "'\u2205'" ]
symbolicNames = [ "<INVALID>",
"LEFT_PAR", "RIGHT_PAR", "ITER", "POS_ITER", "CONCAT", "UNION",
"ALPHABET", "EPSILON", "EMPTYSET", "WS" ]
ruleNames = [ "LEFT_PAR", "RIGHT_PAR", "ITER", "POS_ITER", "CONCAT",
"UNION", "ALPHABET", "EPSILON", "EMPTYSET", "WS" ]
grammarFileName = "REG_grammar.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
LEFT_PAR=1
RIGHT_PAR=2
ITER=3
POS_ITER=4
CONCAT=5
UNION=6
ALPHABET=7
EPSILON=8
EMPTYSET=9
WS=10
'('=1
')'=2
'^+'=4
'.'=5
'+'=6
'ε'=8
'∅'=9
# Generated from REG_grammar.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .REG_grammarParser import REG_grammarParser
else:
from REG_grammarParser import REG_grammarParser
# This class defines a complete listener for a parse tree produced by REG_grammarParser.
class REG_grammarListener(ParseTreeListener):
# Enter a parse tree produced by REG_grammarParser#start.
def enterStart(self, ctx:REG_grammarParser.StartContext):
pass
# Exit a parse tree produced by REG_grammarParser#start.
def exitStart(self, ctx:REG_grammarParser.StartContext):
pass
# Enter a parse tree produced by REG_grammarParser#expr.
def enterExpr(self, ctx:REG_grammarParser.ExprContext):
pass
# Exit a parse tree produced by REG_grammarParser#expr.
def exitExpr(self, ctx:REG_grammarParser.ExprContext):
pass
# Enter a parse tree produced by REG_grammarParser#concatenated.
def enterConcatenated(self, ctx:REG_grammarParser.ConcatenatedContext):
pass
# Exit a parse tree produced by REG_grammarParser#concatenated.
def exitConcatenated(self, ctx:REG_grammarParser.ConcatenatedContext):
pass
# Enter a parse tree produced by REG_grammarParser#iterable.
def enterIterable(self, ctx:REG_grammarParser.IterableContext):
pass
# Exit a parse tree produced by REG_grammarParser#iterable.
def exitIterable(self, ctx:REG_grammarParser.IterableContext):
pass
# Enter a parse tree produced by REG_grammarParser#parentheses.
def enterParentheses(self, ctx:REG_grammarParser.ParenthesesContext):
pass
# Exit a parse tree produced by REG_grammarParser#parentheses.
def exitParentheses(self, ctx:REG_grammarParser.ParenthesesContext):
pass
# Enter a parse tree produced by REG_grammarParser#symbol.
def enterSymbol(self, ctx:REG_grammarParser.SymbolContext):
pass
# Exit a parse tree produced by REG_grammarParser#symbol.
def exitSymbol(self, ctx:REG_grammarParser.SymbolContext):
pass
del REG_grammarParser
\ No newline at end of file
# Generated from REG_grammar.g4 by ANTLR 4.8
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\f")
buf.write("\65\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\3")
buf.write("\2\3\2\5\2\21\n\2\3\3\3\3\6\3\25\n\3\r\3\16\3\26\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\7\3\37\n\3\f\3\16\3\"\13\3\3\4\3\4")
buf.write("\3\4\3\4\3\4\5\4)\n\4\3\5\3\5\5\5-\n\5\3\6\3\6\3\6\3\6")
buf.write("\3\7\3\7\3\7\2\3\4\b\2\4\6\b\n\f\2\4\3\2\5\6\3\2\t\13")
buf.write("\2\65\2\20\3\2\2\2\4\22\3\2\2\2\6(\3\2\2\2\b,\3\2\2\2")
buf.write("\n.\3\2\2\2\f\62\3\2\2\2\16\21\5\4\3\2\17\21\3\2\2\2\20")
buf.write("\16\3\2\2\2\20\17\3\2\2\2\21\3\3\2\2\2\22\24\b\3\1\2\23")
buf.write("\25\5\6\4\2\24\23\3\2\2\2\25\26\3\2\2\2\26\24\3\2\2\2")
buf.write("\26\27\3\2\2\2\27 \3\2\2\2\30\31\f\4\2\2\31\32\7\7\2\2")
buf.write("\32\37\5\4\3\5\33\34\f\3\2\2\34\35\7\b\2\2\35\37\5\4\3")
buf.write("\4\36\30\3\2\2\2\36\33\3\2\2\2\37\"\3\2\2\2 \36\3\2\2")
buf.write("\2 !\3\2\2\2!\5\3\2\2\2\" \3\2\2\2#)\5\f\7\2$%\5\b\5\2")
buf.write("%&\t\2\2\2&)\3\2\2\2\')\5\n\6\2(#\3\2\2\2($\3\2\2\2(\'")
buf.write("\3\2\2\2)\7\3\2\2\2*-\5\f\7\2+-\5\n\6\2,*\3\2\2\2,+\3")
buf.write("\2\2\2-\t\3\2\2\2./\7\3\2\2/\60\5\4\3\2\60\61\7\4\2\2")
buf.write("\61\13\3\2\2\2\62\63\t\3\2\2\63\r\3\2\2\2\b\20\26\36 ")
buf.write("(,")
return buf.getvalue()
class REG_grammarParser ( Parser ):
grammarFileName = "REG_grammar.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'('", "')'", "<INVALID>", "'^+'", "'.'",
"'+'", "<INVALID>", "'\u03B5'", "'\u2205'" ]
symbolicNames = [ "<INVALID>", "LEFT_PAR", "RIGHT_PAR", "ITER", "POS_ITER",
"CONCAT", "UNION", "ALPHABET", "EPSILON", "EMPTYSET",
"WS" ]
RULE_start = 0
RULE_expr = 1
RULE_concatenated = 2
RULE_iterable = 3
RULE_parentheses = 4
RULE_symbol = 5
ruleNames = [ "start", "expr", "concatenated", "iterable", "parentheses",
"symbol" ]
EOF = Token.EOF
LEFT_PAR=1
RIGHT_PAR=2
ITER=3
POS_ITER=4
CONCAT=5
UNION=6
ALPHABET=7
EPSILON=8
EMPTYSET=9
WS=10
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class StartContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expr(self):
return self.getTypedRuleContext(REG_grammarParser.ExprContext,0)
def getRuleIndex(self):
return REG_grammarParser.RULE_start
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStart" ):
listener.enterStart(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStart" ):
listener.exitStart(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStart" ):
return visitor.visitStart(self)
else:
return visitor.visitChildren(self)
def start(self):
localctx = REG_grammarParser.StartContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_start)
try:
self.enterOuterAlt(localctx, 1)
self.state = 14
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [REG_grammarParser.LEFT_PAR, REG_grammarParser.ALPHABET, REG_grammarParser.EPSILON, REG_grammarParser.EMPTYSET]:
self.state = 12
self.expr(0)
pass
elif token in [REG_grammarParser.EOF]:
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def concatenated(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(REG_grammarParser.ConcatenatedContext)
else:
return self.getTypedRuleContext(REG_grammarParser.ConcatenatedContext,i)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(REG_grammarParser.ExprContext)
else:
return self.getTypedRuleContext(REG_grammarParser.ExprContext,i)
def CONCAT(self):
return self.getToken(REG_grammarParser.CONCAT, 0)
def UNION(self):
return self.getToken(REG_grammarParser.UNION, 0)
def getRuleIndex(self):
return REG_grammarParser.RULE_expr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr" ):
listener.enterExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr" ):
listener.exitExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpr" ):
return visitor.visitExpr(self)
else:
return visitor.visitChildren(self)
def expr(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = REG_grammarParser.ExprContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 2
self.enterRecursionRule(localctx, 2, self.RULE_expr, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 18
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 17
self.concatenated()
else:
raise NoViableAltException(self)
self.state = 20
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,1,self._ctx)
self._ctx.stop = self._input.LT(-1)
self.state = 30
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 28
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
if la_ == 1:
localctx = REG_grammarParser.ExprContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 22
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 23
self.match(REG_grammarParser.CONCAT)
self.state = 24
self.expr(3)
pass
elif la_ == 2:
localctx = REG_grammarParser.ExprContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 25
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 26
self.match(REG_grammarParser.UNION)
self.state = 27
self.expr(2)
pass
self.state = 32
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ConcatenatedContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(REG_grammarParser.SymbolContext,0)
def parentheses(self):
return self.getTypedRuleContext(REG_grammarParser.ParenthesesContext,0)
def iterable(self):
return self.getTypedRuleContext(REG_grammarParser.IterableContext,0)
def ITER(self):
return self.getToken(REG_grammarParser.ITER, 0)
def POS_ITER(self):
return self.getToken(REG_grammarParser.POS_ITER, 0)
def getRuleIndex(self):
return REG_grammarParser.RULE_concatenated
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConcatenated" ):
listener.enterConcatenated(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConcatenated" ):
listener.exitConcatenated(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConcatenated" ):
return visitor.visitConcatenated(self)
else:
return visitor.visitChildren(self)
def concatenated(self):
localctx = REG_grammarParser.ConcatenatedContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_concatenated)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 38