Loading DFA_grammar.g4 0 → 100644 +30 −0 Original line number Diff line number Diff line grammar DFA_grammar; /* Parser Rules */ start: init production* final; init: INIT EQUALS statename; production: LEFT_PARENTHESIS statename COMMA statename RIGHT_PARENTHESIS EQUALS statename; final: FINAL EQUALS LEFT_BRACKET (statename (COMMA statename)* | ) RIGHT_BRACKET; statename: STATE; /* Lexer Rules */ /* Tokens */ INIT : 'init'; EQUALS : '='; LEFT_PARENTHESIS : '('; RIGHT_PARENTHESIS : ')'; LEFT_BRACKET : '{'; RIGHT_BRACKET : '}'; COMMA : ','; FINAL : 'final'; STATE : [a-zA-Z0-9]+; /* Characters to be ignored */ WS : [ \r\t\n]+ -> skip ; DFA_grammar.interp 0 → 100644 +36 −0 Original line number Diff line number Diff line token literal names: null 'init' '=' '(' ')' '{' '}' ',' 'final' null null token symbolic names: null INIT EQUALS LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_BRACKET RIGHT_BRACKET COMMA FINAL STATE WS rule names: start init production final statename atn: [3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 12, 52, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 3, 2, 3, 2, 7, 2, 15, 10, 2, 12, 2, 14, 2, 18, 11, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 40, 10, 5, 12, 5, 14, 5, 43, 11, 5, 3, 5, 5, 5, 46, 10, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 2, 2, 7, 2, 4, 6, 8, 10, 2, 2, 2, 49, 2, 12, 3, 2, 2, 2, 4, 21, 3, 2, 2, 2, 6, 25, 3, 2, 2, 2, 8, 33, 3, 2, 2, 2, 10, 49, 3, 2, 2, 2, 12, 16, 5, 4, 3, 2, 13, 15, 5, 6, 4, 2, 14, 13, 3, 2, 2, 2, 15, 18, 3, 2, 2, 2, 16, 14, 3, 2, 2, 2, 16, 17, 3, 2, 2, 2, 17, 19, 3, 2, 2, 2, 18, 16, 3, 2, 2, 2, 19, 20, 5, 8, 5, 2, 20, 3, 3, 2, 2, 2, 21, 22, 7, 3, 2, 2, 22, 23, 7, 4, 2, 2, 23, 24, 5, 10, 6, 2, 24, 5, 3, 2, 2, 2, 25, 26, 7, 5, 2, 2, 26, 27, 5, 10, 6, 2, 27, 28, 7, 9, 2, 2, 28, 29, 5, 10, 6, 2, 29, 30, 7, 6, 2, 2, 30, 31, 7, 4, 2, 2, 31, 32, 5, 10, 6, 2, 32, 7, 3, 2, 2, 2, 33, 34, 7, 10, 2, 2, 34, 35, 7, 4, 2, 2, 35, 45, 7, 7, 2, 2, 36, 41, 5, 10, 6, 2, 37, 38, 7, 9, 2, 2, 38, 40, 5, 10, 6, 2, 39, 37, 3, 2, 2, 2, 40, 43, 3, 2, 2, 2, 41, 39, 3, 2, 2, 2, 41, 42, 3, 2, 2, 2, 42, 46, 3, 2, 2, 2, 43, 41, 3, 2, 2, 2, 44, 46, 3, 2, 2, 2, 45, 36, 3, 2, 2, 2, 45, 44, 3, 2, 2, 2, 46, 47, 3, 2, 2, 2, 47, 48, 7, 8, 2, 2, 48, 9, 3, 2, 2, 2, 49, 50, 7, 11, 2, 2, 50, 11, 3, 2, 2, 2, 5, 16, 41, 45] No newline at end of file DFA_grammar.tokens 0 → 100644 +18 −0 Original line number Diff line number Diff line INIT=1 EQUALS=2 LEFT_PARENTHESIS=3 RIGHT_PARENTHESIS=4 LEFT_BRACKET=5 RIGHT_BRACKET=6 COMMA=7 FINAL=8 STATE=9 WS=10 'init'=1 '='=2 '('=3 ')'=4 '{'=5 '}'=6 ','=7 'final'=8 DFA_grammarLexer.interp 0 → 100644 +47 −0 Original line number Diff line number Diff line token literal names: null 'init' '=' '(' ')' '{' '}' ',' 'final' null null token symbolic names: null INIT EQUALS LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_BRACKET RIGHT_BRACKET COMMA FINAL STATE WS rule names: INIT EQUALS LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_BRACKET RIGHT_BRACKET COMMA FINAL STATE WS channel names: DEFAULT_TOKEN_CHANNEL HIDDEN mode names: DEFAULT_MODE atn: [3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 12, 58, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 10, 6, 10, 48, 10, 10, 13, 10, 14, 10, 49, 3, 11, 6, 11, 53, 10, 11, 13, 11, 14, 11, 54, 3, 11, 3, 11, 2, 2, 12, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 3, 2, 4, 5, 2, 50, 59, 67, 92, 99, 124, 5, 2, 11, 12, 15, 15, 34, 34, 2, 59, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 3, 23, 3, 2, 2, 2, 5, 28, 3, 2, 2, 2, 7, 30, 3, 2, 2, 2, 9, 32, 3, 2, 2, 2, 11, 34, 3, 2, 2, 2, 13, 36, 3, 2, 2, 2, 15, 38, 3, 2, 2, 2, 17, 40, 3, 2, 2, 2, 19, 47, 3, 2, 2, 2, 21, 52, 3, 2, 2, 2, 23, 24, 7, 107, 2, 2, 24, 25, 7, 112, 2, 2, 25, 26, 7, 107, 2, 2, 26, 27, 7, 118, 2, 2, 27, 4, 3, 2, 2, 2, 28, 29, 7, 63, 2, 2, 29, 6, 3, 2, 2, 2, 30, 31, 7, 42, 2, 2, 31, 8, 3, 2, 2, 2, 32, 33, 7, 43, 2, 2, 33, 10, 3, 2, 2, 2, 34, 35, 7, 125, 2, 2, 35, 12, 3, 2, 2, 2, 36, 37, 7, 127, 2, 2, 37, 14, 3, 2, 2, 2, 38, 39, 7, 46, 2, 2, 39, 16, 3, 2, 2, 2, 40, 41, 7, 104, 2, 2, 41, 42, 7, 107, 2, 2, 42, 43, 7, 112, 2, 2, 43, 44, 7, 99, 2, 2, 44, 45, 7, 110, 2, 2, 45, 18, 3, 2, 2, 2, 46, 48, 9, 2, 2, 2, 47, 46, 3, 2, 2, 2, 48, 49, 3, 2, 2, 2, 49, 47, 3, 2, 2, 2, 49, 50, 3, 2, 2, 2, 50, 20, 3, 2, 2, 2, 51, 53, 9, 3, 2, 2, 52, 51, 3, 2, 2, 2, 53, 54, 3, 2, 2, 2, 54, 52, 3, 2, 2, 2, 54, 55, 3, 2, 2, 2, 55, 56, 3, 2, 2, 2, 56, 57, 8, 11, 2, 2, 57, 22, 3, 2, 2, 2, 5, 2, 49, 54, 3, 8, 2, 2] No newline at end of file DFA_grammarLexer.py 0 → 100644 +76 −0 Original line number Diff line number Diff line # Generated from DFA_grammar.g4 by ANTLR 4.8 from antlr4 import * # type: ignore from io import StringIO from typing.io import TextIO import sys def serializedATN(): with StringIO() as buf: buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\f") buf.write(":\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7") buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\3\2\3\2\3\2\3\2\3\2") buf.write("\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3") buf.write("\t\3\t\3\t\3\t\3\t\3\n\6\n\60\n\n\r\n\16\n\61\3\13\6\13") buf.write("\65\n\13\r\13\16\13\66\3\13\3\13\2\2\f\3\3\5\4\7\5\t\6") buf.write("\13\7\r\b\17\t\21\n\23\13\25\f\3\2\4\5\2\62;C\\c|\5\2") buf.write("\13\f\17\17\"\"\2;\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2") buf.write("\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21") buf.write("\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\3\27\3\2\2\2\5\34\3") buf.write("\2\2\2\7\36\3\2\2\2\t \3\2\2\2\13\"\3\2\2\2\r$\3\2\2\2") buf.write("\17&\3\2\2\2\21(\3\2\2\2\23/\3\2\2\2\25\64\3\2\2\2\27") buf.write("\30\7k\2\2\30\31\7p\2\2\31\32\7k\2\2\32\33\7v\2\2\33\4") buf.write("\3\2\2\2\34\35\7?\2\2\35\6\3\2\2\2\36\37\7*\2\2\37\b\3") buf.write("\2\2\2 !\7+\2\2!\n\3\2\2\2\"#\7}\2\2#\f\3\2\2\2$%\7\177") buf.write("\2\2%\16\3\2\2\2&\'\7.\2\2\'\20\3\2\2\2()\7h\2\2)*\7k") buf.write("\2\2*+\7p\2\2+,\7c\2\2,-\7n\2\2-\22\3\2\2\2.\60\t\2\2") buf.write("\2/.\3\2\2\2\60\61\3\2\2\2\61/\3\2\2\2\61\62\3\2\2\2\62") buf.write("\24\3\2\2\2\63\65\t\3\2\2\64\63\3\2\2\2\65\66\3\2\2\2") buf.write("\66\64\3\2\2\2\66\67\3\2\2\2\678\3\2\2\289\b\13\2\29\26") buf.write("\3\2\2\2\5\2\61\66\3\b\2\2") return buf.getvalue() class DFA_grammarLexer(Lexer): atn = ATNDeserializer().deserialize(serializedATN()) decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] INIT = 1 EQUALS = 2 LEFT_PARENTHESIS = 3 RIGHT_PARENTHESIS = 4 LEFT_BRACKET = 5 RIGHT_BRACKET = 6 COMMA = 7 FINAL = 8 STATE = 9 WS = 10 channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] modeNames = [ "DEFAULT_MODE" ] literalNames = [ "<INVALID>", "'init'", "'='", "'('", "')'", "'{'", "'}'", "','", "'final'" ] symbolicNames = [ "<INVALID>", "INIT", "EQUALS", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_BRACKET", "RIGHT_BRACKET", "COMMA", "FINAL", "STATE", "WS" ] ruleNames = [ "INIT", "EQUALS", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_BRACKET", "RIGHT_BRACKET", "COMMA", "FINAL", "STATE", "WS" ] grammarFileName = "DFA_grammar.g4" def __init__(self, input=None, output:TextIO = sys.stdout): super().__init__(input, output) self.checkVersion("4.8") self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None Loading
DFA_grammar.g4 0 → 100644 +30 −0 Original line number Diff line number Diff line grammar DFA_grammar; /* Parser Rules */ start: init production* final; init: INIT EQUALS statename; production: LEFT_PARENTHESIS statename COMMA statename RIGHT_PARENTHESIS EQUALS statename; final: FINAL EQUALS LEFT_BRACKET (statename (COMMA statename)* | ) RIGHT_BRACKET; statename: STATE; /* Lexer Rules */ /* Tokens */ INIT : 'init'; EQUALS : '='; LEFT_PARENTHESIS : '('; RIGHT_PARENTHESIS : ')'; LEFT_BRACKET : '{'; RIGHT_BRACKET : '}'; COMMA : ','; FINAL : 'final'; STATE : [a-zA-Z0-9]+; /* Characters to be ignored */ WS : [ \r\t\n]+ -> skip ;
DFA_grammar.interp 0 → 100644 +36 −0 Original line number Diff line number Diff line token literal names: null 'init' '=' '(' ')' '{' '}' ',' 'final' null null token symbolic names: null INIT EQUALS LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_BRACKET RIGHT_BRACKET COMMA FINAL STATE WS rule names: start init production final statename atn: [3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 12, 52, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 3, 2, 3, 2, 7, 2, 15, 10, 2, 12, 2, 14, 2, 18, 11, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 40, 10, 5, 12, 5, 14, 5, 43, 11, 5, 3, 5, 5, 5, 46, 10, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 2, 2, 7, 2, 4, 6, 8, 10, 2, 2, 2, 49, 2, 12, 3, 2, 2, 2, 4, 21, 3, 2, 2, 2, 6, 25, 3, 2, 2, 2, 8, 33, 3, 2, 2, 2, 10, 49, 3, 2, 2, 2, 12, 16, 5, 4, 3, 2, 13, 15, 5, 6, 4, 2, 14, 13, 3, 2, 2, 2, 15, 18, 3, 2, 2, 2, 16, 14, 3, 2, 2, 2, 16, 17, 3, 2, 2, 2, 17, 19, 3, 2, 2, 2, 18, 16, 3, 2, 2, 2, 19, 20, 5, 8, 5, 2, 20, 3, 3, 2, 2, 2, 21, 22, 7, 3, 2, 2, 22, 23, 7, 4, 2, 2, 23, 24, 5, 10, 6, 2, 24, 5, 3, 2, 2, 2, 25, 26, 7, 5, 2, 2, 26, 27, 5, 10, 6, 2, 27, 28, 7, 9, 2, 2, 28, 29, 5, 10, 6, 2, 29, 30, 7, 6, 2, 2, 30, 31, 7, 4, 2, 2, 31, 32, 5, 10, 6, 2, 32, 7, 3, 2, 2, 2, 33, 34, 7, 10, 2, 2, 34, 35, 7, 4, 2, 2, 35, 45, 7, 7, 2, 2, 36, 41, 5, 10, 6, 2, 37, 38, 7, 9, 2, 2, 38, 40, 5, 10, 6, 2, 39, 37, 3, 2, 2, 2, 40, 43, 3, 2, 2, 2, 41, 39, 3, 2, 2, 2, 41, 42, 3, 2, 2, 2, 42, 46, 3, 2, 2, 2, 43, 41, 3, 2, 2, 2, 44, 46, 3, 2, 2, 2, 45, 36, 3, 2, 2, 2, 45, 44, 3, 2, 2, 2, 46, 47, 3, 2, 2, 2, 47, 48, 7, 8, 2, 2, 48, 9, 3, 2, 2, 2, 49, 50, 7, 11, 2, 2, 50, 11, 3, 2, 2, 2, 5, 16, 41, 45] No newline at end of file
DFA_grammar.tokens 0 → 100644 +18 −0 Original line number Diff line number Diff line INIT=1 EQUALS=2 LEFT_PARENTHESIS=3 RIGHT_PARENTHESIS=4 LEFT_BRACKET=5 RIGHT_BRACKET=6 COMMA=7 FINAL=8 STATE=9 WS=10 'init'=1 '='=2 '('=3 ')'=4 '{'=5 '}'=6 ','=7 'final'=8
DFA_grammarLexer.interp 0 → 100644 +47 −0 Original line number Diff line number Diff line token literal names: null 'init' '=' '(' ')' '{' '}' ',' 'final' null null token symbolic names: null INIT EQUALS LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_BRACKET RIGHT_BRACKET COMMA FINAL STATE WS rule names: INIT EQUALS LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_BRACKET RIGHT_BRACKET COMMA FINAL STATE WS channel names: DEFAULT_TOKEN_CHANNEL HIDDEN mode names: DEFAULT_MODE atn: [3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 12, 58, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 10, 6, 10, 48, 10, 10, 13, 10, 14, 10, 49, 3, 11, 6, 11, 53, 10, 11, 13, 11, 14, 11, 54, 3, 11, 3, 11, 2, 2, 12, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 3, 2, 4, 5, 2, 50, 59, 67, 92, 99, 124, 5, 2, 11, 12, 15, 15, 34, 34, 2, 59, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 3, 23, 3, 2, 2, 2, 5, 28, 3, 2, 2, 2, 7, 30, 3, 2, 2, 2, 9, 32, 3, 2, 2, 2, 11, 34, 3, 2, 2, 2, 13, 36, 3, 2, 2, 2, 15, 38, 3, 2, 2, 2, 17, 40, 3, 2, 2, 2, 19, 47, 3, 2, 2, 2, 21, 52, 3, 2, 2, 2, 23, 24, 7, 107, 2, 2, 24, 25, 7, 112, 2, 2, 25, 26, 7, 107, 2, 2, 26, 27, 7, 118, 2, 2, 27, 4, 3, 2, 2, 2, 28, 29, 7, 63, 2, 2, 29, 6, 3, 2, 2, 2, 30, 31, 7, 42, 2, 2, 31, 8, 3, 2, 2, 2, 32, 33, 7, 43, 2, 2, 33, 10, 3, 2, 2, 2, 34, 35, 7, 125, 2, 2, 35, 12, 3, 2, 2, 2, 36, 37, 7, 127, 2, 2, 37, 14, 3, 2, 2, 2, 38, 39, 7, 46, 2, 2, 39, 16, 3, 2, 2, 2, 40, 41, 7, 104, 2, 2, 41, 42, 7, 107, 2, 2, 42, 43, 7, 112, 2, 2, 43, 44, 7, 99, 2, 2, 44, 45, 7, 110, 2, 2, 45, 18, 3, 2, 2, 2, 46, 48, 9, 2, 2, 2, 47, 46, 3, 2, 2, 2, 48, 49, 3, 2, 2, 2, 49, 47, 3, 2, 2, 2, 49, 50, 3, 2, 2, 2, 50, 20, 3, 2, 2, 2, 51, 53, 9, 3, 2, 2, 52, 51, 3, 2, 2, 2, 53, 54, 3, 2, 2, 2, 54, 52, 3, 2, 2, 2, 54, 55, 3, 2, 2, 2, 55, 56, 3, 2, 2, 2, 56, 57, 8, 11, 2, 2, 57, 22, 3, 2, 2, 2, 5, 2, 49, 54, 3, 8, 2, 2] No newline at end of file
DFA_grammarLexer.py 0 → 100644 +76 −0 Original line number Diff line number Diff line # Generated from DFA_grammar.g4 by ANTLR 4.8 from antlr4 import * # type: ignore from io import StringIO from typing.io import TextIO import sys def serializedATN(): with StringIO() as buf: buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\f") buf.write(":\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7") buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\3\2\3\2\3\2\3\2\3\2") buf.write("\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3") buf.write("\t\3\t\3\t\3\t\3\t\3\n\6\n\60\n\n\r\n\16\n\61\3\13\6\13") buf.write("\65\n\13\r\13\16\13\66\3\13\3\13\2\2\f\3\3\5\4\7\5\t\6") buf.write("\13\7\r\b\17\t\21\n\23\13\25\f\3\2\4\5\2\62;C\\c|\5\2") buf.write("\13\f\17\17\"\"\2;\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2") buf.write("\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21") buf.write("\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\3\27\3\2\2\2\5\34\3") buf.write("\2\2\2\7\36\3\2\2\2\t \3\2\2\2\13\"\3\2\2\2\r$\3\2\2\2") buf.write("\17&\3\2\2\2\21(\3\2\2\2\23/\3\2\2\2\25\64\3\2\2\2\27") buf.write("\30\7k\2\2\30\31\7p\2\2\31\32\7k\2\2\32\33\7v\2\2\33\4") buf.write("\3\2\2\2\34\35\7?\2\2\35\6\3\2\2\2\36\37\7*\2\2\37\b\3") buf.write("\2\2\2 !\7+\2\2!\n\3\2\2\2\"#\7}\2\2#\f\3\2\2\2$%\7\177") buf.write("\2\2%\16\3\2\2\2&\'\7.\2\2\'\20\3\2\2\2()\7h\2\2)*\7k") buf.write("\2\2*+\7p\2\2+,\7c\2\2,-\7n\2\2-\22\3\2\2\2.\60\t\2\2") buf.write("\2/.\3\2\2\2\60\61\3\2\2\2\61/\3\2\2\2\61\62\3\2\2\2\62") buf.write("\24\3\2\2\2\63\65\t\3\2\2\64\63\3\2\2\2\65\66\3\2\2\2") buf.write("\66\64\3\2\2\2\66\67\3\2\2\2\678\3\2\2\289\b\13\2\29\26") buf.write("\3\2\2\2\5\2\61\66\3\b\2\2") return buf.getvalue() class DFA_grammarLexer(Lexer): atn = ATNDeserializer().deserialize(serializedATN()) decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] INIT = 1 EQUALS = 2 LEFT_PARENTHESIS = 3 RIGHT_PARENTHESIS = 4 LEFT_BRACKET = 5 RIGHT_BRACKET = 6 COMMA = 7 FINAL = 8 STATE = 9 WS = 10 channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] modeNames = [ "DEFAULT_MODE" ] literalNames = [ "<INVALID>", "'init'", "'='", "'('", "')'", "'{'", "'}'", "','", "'final'" ] symbolicNames = [ "<INVALID>", "INIT", "EQUALS", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_BRACKET", "RIGHT_BRACKET", "COMMA", "FINAL", "STATE", "WS" ] ruleNames = [ "INIT", "EQUALS", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_BRACKET", "RIGHT_BRACKET", "COMMA", "FINAL", "STATE", "WS" ] grammarFileName = "DFA_grammar.g4" def __init__(self, input=None, output:TextIO = sys.stdout): super().__init__(input, output) self.checkVersion("4.8") self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None