Commit 0e509fd4 authored by Kateřina Sloupová's avatar Kateřina Sloupová
Browse files

add NFA parser

parent 41b36c4f
Pipeline #54653 failed with stage
in 21 seconds
grammar NFA_grammar;
/* Parser Rules */
start: init production* final;
init: INIT EQUALS statename;
production: LEFT_PARENTHESIS statename COMMA statename RIGHT_PARENTHESIS EQUALS stateset;
stateset: LEFT_BRACKET (statename (COMMA statename)* | ) RIGHT_BRACKET;
final: FINAL EQUALS stateset;
statename: STATE;
/* Lexer Rules */
/* Tokens */
INIT : 'init';
EQUALS : '=';
LEFT_PARENTHESIS : '(';
RIGHT_PARENTHESIS : ')';
LEFT_BRACKET : '{';
RIGHT_BRACKET : '}';
COMMA : ',';
FINAL : ('final' | 'F');
STATE : [a-zA-Z0-9]+;
/* Characters to be ignored */
WS : [ \r\t\n]+ -> skip ;
token literal names:
null
'init'
'='
'('
')'
'{'
'}'
','
null
null
null
token symbolic names:
null
INIT
EQUALS
LEFT_PARENTHESIS
RIGHT_PARENTHESIS
LEFT_BRACKET
RIGHT_BRACKET
COMMA
FINAL
STATE
WS
rule names:
start
init
production
stateset
final
statename
atn:
[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 12, 56, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 3, 2, 3, 2, 7, 2, 17, 10, 2, 12, 2, 14, 2, 20, 11, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 40, 10, 5, 12, 5, 14, 5, 43, 11, 5, 3, 5, 5, 5, 46, 10, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 2, 2, 8, 2, 4, 6, 8, 10, 12, 2, 2, 2, 52, 2, 14, 3, 2, 2, 2, 4, 23, 3, 2, 2, 2, 6, 27, 3, 2, 2, 2, 8, 35, 3, 2, 2, 2, 10, 49, 3, 2, 2, 2, 12, 53, 3, 2, 2, 2, 14, 18, 5, 4, 3, 2, 15, 17, 5, 6, 4, 2, 16, 15, 3, 2, 2, 2, 17, 20, 3, 2, 2, 2, 18, 16, 3, 2, 2, 2, 18, 19, 3, 2, 2, 2, 19, 21, 3, 2, 2, 2, 20, 18, 3, 2, 2, 2, 21, 22, 5, 10, 6, 2, 22, 3, 3, 2, 2, 2, 23, 24, 7, 3, 2, 2, 24, 25, 7, 4, 2, 2, 25, 26, 5, 12, 7, 2, 26, 5, 3, 2, 2, 2, 27, 28, 7, 5, 2, 2, 28, 29, 5, 12, 7, 2, 29, 30, 7, 9, 2, 2, 30, 31, 5, 12, 7, 2, 31, 32, 7, 6, 2, 2, 32, 33, 7, 4, 2, 2, 33, 34, 5, 8, 5, 2, 34, 7, 3, 2, 2, 2, 35, 45, 7, 7, 2, 2, 36, 41, 5, 12, 7, 2, 37, 38, 7, 9, 2, 2, 38, 40, 5, 12, 7, 2, 39, 37, 3, 2, 2, 2, 40, 43, 3, 2, 2, 2, 41, 39, 3, 2, 2, 2, 41, 42, 3, 2, 2, 2, 42, 46, 3, 2, 2, 2, 43, 41, 3, 2, 2, 2, 44, 46, 3, 2, 2, 2, 45, 36, 3, 2, 2, 2, 45, 44, 3, 2, 2, 2, 46, 47, 3, 2, 2, 2, 47, 48, 7, 8, 2, 2, 48, 9, 3, 2, 2, 2, 49, 50, 7, 10, 2, 2, 50, 51, 7, 4, 2, 2, 51, 52, 5, 8, 5, 2, 52, 11, 3, 2, 2, 2, 53, 54, 7, 11, 2, 2, 54, 13, 3, 2, 2, 2, 5, 18, 41, 45]
\ No newline at end of file
INIT=1
EQUALS=2
LEFT_PARENTHESIS=3
RIGHT_PARENTHESIS=4
LEFT_BRACKET=5
RIGHT_BRACKET=6
COMMA=7
FINAL=8
STATE=9
WS=10
'init'=1
'='=2
'('=3
')'=4
'{'=5
'}'=6
','=7
token literal names:
null
'init'
'='
'('
')'
'{'
'}'
','
null
null
null
token symbolic names:
null
INIT
EQUALS
LEFT_PARENTHESIS
RIGHT_PARENTHESIS
LEFT_BRACKET
RIGHT_BRACKET
COMMA
FINAL
STATE
WS
rule names:
INIT
EQUALS
LEFT_PARENTHESIS
RIGHT_PARENTHESIS
LEFT_BRACKET
RIGHT_BRACKET
COMMA
FINAL
STATE
WS
channel names:
DEFAULT_TOKEN_CHANNEL
HIDDEN
mode names:
DEFAULT_MODE
atn:
[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 12, 60, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 5, 9, 47, 10, 9, 3, 10, 6, 10, 50, 10, 10, 13, 10, 14, 10, 51, 3, 11, 6, 11, 55, 10, 11, 13, 11, 14, 11, 56, 3, 11, 3, 11, 2, 2, 12, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 3, 2, 4, 5, 2, 50, 59, 67, 92, 99, 124, 5, 2, 11, 12, 15, 15, 34, 34, 2, 62, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 3, 23, 3, 2, 2, 2, 5, 28, 3, 2, 2, 2, 7, 30, 3, 2, 2, 2, 9, 32, 3, 2, 2, 2, 11, 34, 3, 2, 2, 2, 13, 36, 3, 2, 2, 2, 15, 38, 3, 2, 2, 2, 17, 46, 3, 2, 2, 2, 19, 49, 3, 2, 2, 2, 21, 54, 3, 2, 2, 2, 23, 24, 7, 107, 2, 2, 24, 25, 7, 112, 2, 2, 25, 26, 7, 107, 2, 2, 26, 27, 7, 118, 2, 2, 27, 4, 3, 2, 2, 2, 28, 29, 7, 63, 2, 2, 29, 6, 3, 2, 2, 2, 30, 31, 7, 42, 2, 2, 31, 8, 3, 2, 2, 2, 32, 33, 7, 43, 2, 2, 33, 10, 3, 2, 2, 2, 34, 35, 7, 125, 2, 2, 35, 12, 3, 2, 2, 2, 36, 37, 7, 127, 2, 2, 37, 14, 3, 2, 2, 2, 38, 39, 7, 46, 2, 2, 39, 16, 3, 2, 2, 2, 40, 41, 7, 104, 2, 2, 41, 42, 7, 107, 2, 2, 42, 43, 7, 112, 2, 2, 43, 44, 7, 99, 2, 2, 44, 47, 7, 110, 2, 2, 45, 47, 7, 72, 2, 2, 46, 40, 3, 2, 2, 2, 46, 45, 3, 2, 2, 2, 47, 18, 3, 2, 2, 2, 48, 50, 9, 2, 2, 2, 49, 48, 3, 2, 2, 2, 50, 51, 3, 2, 2, 2, 51, 49, 3, 2, 2, 2, 51, 52, 3, 2, 2, 2, 52, 20, 3, 2, 2, 2, 53, 55, 9, 3, 2, 2, 54, 53, 3, 2, 2, 2, 55, 56, 3, 2, 2, 2, 56, 54, 3, 2, 2, 2, 56, 57, 3, 2, 2, 2, 57, 58, 3, 2, 2, 2, 58, 59, 8, 11, 2, 2, 59, 22, 3, 2, 2, 2, 6, 2, 46, 51, 56, 3, 8, 2, 2]
\ No newline at end of file
# Generated from NFA_grammar.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\f")
buf.write("<\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\3\2\3\2\3\2\3\2\3\2")
buf.write("\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3")
buf.write("\t\3\t\3\t\3\t\3\t\5\t/\n\t\3\n\6\n\62\n\n\r\n\16\n\63")
buf.write("\3\13\6\13\67\n\13\r\13\16\138\3\13\3\13\2\2\f\3\3\5\4")
buf.write("\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\3\2\4\5\2\62;C")
buf.write("\\c|\5\2\13\f\17\17\"\"\2>\2\3\3\2\2\2\2\5\3\2\2\2\2\7")
buf.write("\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2")
buf.write("\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\3\27\3\2\2")
buf.write("\2\5\34\3\2\2\2\7\36\3\2\2\2\t \3\2\2\2\13\"\3\2\2\2\r")
buf.write("$\3\2\2\2\17&\3\2\2\2\21.\3\2\2\2\23\61\3\2\2\2\25\66")
buf.write("\3\2\2\2\27\30\7k\2\2\30\31\7p\2\2\31\32\7k\2\2\32\33")
buf.write("\7v\2\2\33\4\3\2\2\2\34\35\7?\2\2\35\6\3\2\2\2\36\37\7")
buf.write("*\2\2\37\b\3\2\2\2 !\7+\2\2!\n\3\2\2\2\"#\7}\2\2#\f\3")
buf.write("\2\2\2$%\7\177\2\2%\16\3\2\2\2&\'\7.\2\2\'\20\3\2\2\2")
buf.write("()\7h\2\2)*\7k\2\2*+\7p\2\2+,\7c\2\2,/\7n\2\2-/\7H\2\2")
buf.write(".(\3\2\2\2.-\3\2\2\2/\22\3\2\2\2\60\62\t\2\2\2\61\60\3")
buf.write("\2\2\2\62\63\3\2\2\2\63\61\3\2\2\2\63\64\3\2\2\2\64\24")
buf.write("\3\2\2\2\65\67\t\3\2\2\66\65\3\2\2\2\678\3\2\2\28\66\3")
buf.write("\2\2\289\3\2\2\29:\3\2\2\2:;\b\13\2\2;\26\3\2\2\2\6\2")
buf.write(".\638\3\b\2\2")
return buf.getvalue()
class NFA_grammarLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
INIT = 1
EQUALS = 2
LEFT_PARENTHESIS = 3
RIGHT_PARENTHESIS = 4
LEFT_BRACKET = 5
RIGHT_BRACKET = 6
COMMA = 7
FINAL = 8
STATE = 9
WS = 10
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'init'", "'='", "'('", "')'", "'{'", "'}'", "','" ]
symbolicNames = [ "<INVALID>",
"INIT", "EQUALS", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_BRACKET",
"RIGHT_BRACKET", "COMMA", "FINAL", "STATE", "WS" ]
ruleNames = [ "INIT", "EQUALS", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS",
"LEFT_BRACKET", "RIGHT_BRACKET", "COMMA", "FINAL", "STATE",
"WS" ]
grammarFileName = "NFA_grammar.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
INIT=1
EQUALS=2
LEFT_PARENTHESIS=3
RIGHT_PARENTHESIS=4
LEFT_BRACKET=5
RIGHT_BRACKET=6
COMMA=7
FINAL=8
STATE=9
WS=10
'init'=1
'='=2
'('=3
')'=4
'{'=5
'}'=6
','=7
# Generated from NFA_grammar.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .NFA_grammarParser import NFA_grammarParser
else:
from NFA_grammarParser import NFA_grammarParser
# This class defines a complete listener for a parse tree produced by NFA_grammarParser.
class NFA_grammarListener(ParseTreeListener):
# Enter a parse tree produced by NFA_grammarParser#start.
def enterStart(self, ctx:NFA_grammarParser.StartContext):
pass
# Exit a parse tree produced by NFA_grammarParser#start.
def exitStart(self, ctx:NFA_grammarParser.StartContext):
pass
# Enter a parse tree produced by NFA_grammarParser#init.
def enterInit(self, ctx:NFA_grammarParser.InitContext):
pass
# Exit a parse tree produced by NFA_grammarParser#init.
def exitInit(self, ctx:NFA_grammarParser.InitContext):
pass
# Enter a parse tree produced by NFA_grammarParser#production.
def enterProduction(self, ctx:NFA_grammarParser.ProductionContext):
pass
# Exit a parse tree produced by NFA_grammarParser#production.
def exitProduction(self, ctx:NFA_grammarParser.ProductionContext):
pass
# Enter a parse tree produced by NFA_grammarParser#stateset.
def enterStateset(self, ctx:NFA_grammarParser.StatesetContext):
pass
# Exit a parse tree produced by NFA_grammarParser#stateset.
def exitStateset(self, ctx:NFA_grammarParser.StatesetContext):
pass
# Enter a parse tree produced by NFA_grammarParser#final.
def enterFinal(self, ctx:NFA_grammarParser.FinalContext):
pass
# Exit a parse tree produced by NFA_grammarParser#final.
def exitFinal(self, ctx:NFA_grammarParser.FinalContext):
pass
# Enter a parse tree produced by NFA_grammarParser#statename.
def enterStatename(self, ctx:NFA_grammarParser.StatenameContext):
pass
# Exit a parse tree produced by NFA_grammarParser#statename.
def exitStatename(self, ctx:NFA_grammarParser.StatenameContext):
pass
del NFA_grammarParser
\ No newline at end of file
# Generated from NFA_grammar.g4 by ANTLR 4.8
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\f")
buf.write("8\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\3\2")
buf.write("\3\2\7\2\21\n\2\f\2\16\2\24\13\2\3\2\3\2\3\3\3\3\3\3\3")
buf.write("\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\5\3\5\3\5\3\5\7\5")
buf.write("(\n\5\f\5\16\5+\13\5\3\5\5\5.\n\5\3\5\3\5\3\6\3\6\3\6")
buf.write("\3\6\3\7\3\7\3\7\2\2\b\2\4\6\b\n\f\2\2\2\64\2\16\3\2\2")
buf.write("\2\4\27\3\2\2\2\6\33\3\2\2\2\b#\3\2\2\2\n\61\3\2\2\2\f")
buf.write("\65\3\2\2\2\16\22\5\4\3\2\17\21\5\6\4\2\20\17\3\2\2\2")
buf.write("\21\24\3\2\2\2\22\20\3\2\2\2\22\23\3\2\2\2\23\25\3\2\2")
buf.write("\2\24\22\3\2\2\2\25\26\5\n\6\2\26\3\3\2\2\2\27\30\7\3")
buf.write("\2\2\30\31\7\4\2\2\31\32\5\f\7\2\32\5\3\2\2\2\33\34\7")
buf.write("\5\2\2\34\35\5\f\7\2\35\36\7\t\2\2\36\37\5\f\7\2\37 \7")
buf.write("\6\2\2 !\7\4\2\2!\"\5\b\5\2\"\7\3\2\2\2#-\7\7\2\2$)\5")
buf.write("\f\7\2%&\7\t\2\2&(\5\f\7\2\'%\3\2\2\2(+\3\2\2\2)\'\3\2")
buf.write("\2\2)*\3\2\2\2*.\3\2\2\2+)\3\2\2\2,.\3\2\2\2-$\3\2\2\2")
buf.write("-,\3\2\2\2./\3\2\2\2/\60\7\b\2\2\60\t\3\2\2\2\61\62\7")
buf.write("\n\2\2\62\63\7\4\2\2\63\64\5\b\5\2\64\13\3\2\2\2\65\66")
buf.write("\7\13\2\2\66\r\3\2\2\2\5\22)-")
return buf.getvalue()
class NFA_grammarParser ( Parser ):
grammarFileName = "NFA_grammar.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'init'", "'='", "'('", "')'", "'{'",
"'}'", "','" ]
symbolicNames = [ "<INVALID>", "INIT", "EQUALS", "LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS", "LEFT_BRACKET", "RIGHT_BRACKET",
"COMMA", "FINAL", "STATE", "WS" ]
RULE_start = 0
RULE_init = 1
RULE_production = 2
RULE_stateset = 3
RULE_final = 4
RULE_statename = 5
ruleNames = [ "start", "init", "production", "stateset", "final", "statename" ]
EOF = Token.EOF
INIT=1
EQUALS=2
LEFT_PARENTHESIS=3
RIGHT_PARENTHESIS=4
LEFT_BRACKET=5
RIGHT_BRACKET=6
COMMA=7
FINAL=8
STATE=9
WS=10
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class StartContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def init(self):
return self.getTypedRuleContext(NFA_grammarParser.InitContext,0)
def final(self):
return self.getTypedRuleContext(NFA_grammarParser.FinalContext,0)
def production(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NFA_grammarParser.ProductionContext)
else:
return self.getTypedRuleContext(NFA_grammarParser.ProductionContext,i)
def getRuleIndex(self):
return NFA_grammarParser.RULE_start
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStart" ):
listener.enterStart(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStart" ):
listener.exitStart(self)
def start(self):
localctx = NFA_grammarParser.StartContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_start)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 12
self.init()
self.state = 16
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==NFA_grammarParser.LEFT_PARENTHESIS:
self.state = 13
self.production()
self.state = 18
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 19
self.final()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InitContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INIT(self):
return self.getToken(NFA_grammarParser.INIT, 0)
def EQUALS(self):
return self.getToken(NFA_grammarParser.EQUALS, 0)
def statename(self):
return self.getTypedRuleContext(NFA_grammarParser.StatenameContext,0)
def getRuleIndex(self):
return NFA_grammarParser.RULE_init
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInit" ):
listener.enterInit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInit" ):
listener.exitInit(self)
def init(self):
localctx = NFA_grammarParser.InitContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_init)
try:
self.enterOuterAlt(localctx, 1)
self.state = 21
self.match(NFA_grammarParser.INIT)
self.state = 22
self.match(NFA_grammarParser.EQUALS)
self.state = 23
self.statename()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ProductionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEFT_PARENTHESIS(self):
return self.getToken(NFA_grammarParser.LEFT_PARENTHESIS, 0)
def statename(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NFA_grammarParser.StatenameContext)
else:
return self.getTypedRuleContext(NFA_grammarParser.StatenameContext,i)
def COMMA(self):
return self.getToken(NFA_grammarParser.COMMA, 0)
def RIGHT_PARENTHESIS(self):
return self.getToken(NFA_grammarParser.RIGHT_PARENTHESIS, 0)
def EQUALS(self):
return self.getToken(NFA_grammarParser.EQUALS, 0)
def stateset(self):
return self.getTypedRuleContext(NFA_grammarParser.StatesetContext,0)
def getRuleIndex(self):
return NFA_grammarParser.RULE_production
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProduction" ):
listener.enterProduction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProduction" ):
listener.exitProduction(self)
def production(self):
localctx = NFA_grammarParser.ProductionContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_production)
try:
self.enterOuterAlt(localctx, 1)
self.state = 25
self.match(NFA_grammarParser.LEFT_PARENTHESIS)
self.state = 26
self.statename()
self.state = 27
self.match(NFA_grammarParser.COMMA)
self.state = 28
self.statename()
self.state = 29
self.match(NFA_grammarParser.RIGHT_PARENTHESIS)
self.state = 30
self.match(NFA_grammarParser.EQUALS)
self.state = 31
self.stateset()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatesetContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEFT_BRACKET(self):
return self.getToken(NFA_grammarParser.LEFT_BRACKET, 0)
def RIGHT_BRACKET(self):
return self.getToken(NFA_grammarParser.RIGHT_BRACKET, 0)
def statename(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NFA_grammarParser.StatenameContext)
else:
return self.getTypedRuleContext(NFA_grammarParser.StatenameContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(NFA_grammarParser.COMMA)
else:
return self.getToken(NFA_grammarParser.COMMA, i)
def getRuleIndex(self):
return NFA_grammarParser.RULE_stateset
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStateset" ):
listener.enterStateset(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStateset" ):
listener.exitStateset(self)
def stateset(self):
localctx = NFA_grammarParser.StatesetContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_stateset)
self._la = 0 # Token type