Commit cb1746ce authored by Kateřina Sloupová's avatar Kateřina Sloupová
Browse files

typing tweaks

parent 0e80271b
Pipeline #62480 passed with stage
in 59 seconds
......@@ -58,7 +58,7 @@ def compare():
student_string=student_string, student_type=tasks[student_type],
teacher_string=teacher_string, teacher_type=types[teacher_type],
extra_word_ce=extra_word_ce, missing_word_ce=missing_word_ce)
flash(error)
#flash(error)
student_example = ""
teacher_example = ""
......@@ -88,7 +88,7 @@ def convert():
return render_template('result.html', compare=False, student_string=student_string,
student_type=types[student_type], task=tasks[task], output=output)
flash(error)
#flash(error)
example = ""
# if request.method == 'POST' and 'example_button' in request.form:
......
......@@ -20,7 +20,10 @@ class WebChecker():
except ParsingError as ex:
return("Chyba při parsování.")
task_solved = check_task(student_solution, task)
task_solved = ""
if isinstance(student_solution, reg.DFA) or isinstance(student_solution, reg.NFA):
task_solved = check_task(student_solution, task)
check_empty(student_solution=student_dfa,
teacher_solution=teacher_dfa)
......@@ -51,27 +54,27 @@ class WebChecker():
parser = Parser()
if self.task in {"DFA", "TOT", "MIN"}:
automaton = dfa_transform(self.student_string, student_type)
dfa = dfa_transform(self.student_string, student_type)
if self.task == "DFA":
return parser.dfa_to_str(automaton)
return parser.dfa_to_str(dfa)
if self.task == "TOT":
return parser.dfa_to_str(automaton.total())
return parser.dfa_to_str(dfa.total())
if self.task == "MIN":
return parser.dfa_to_str(automaton.minimize())
return parser.dfa_to_str(dfa.minimize())
#if canonize: TODO checkbox
# return parser.dfa_to_str(automaton.canonize())
automaton = nfa_transform(self.student_string, student_type)
nfa = nfa_transform(self.student_string, student_type)
if self.task == "EFA":
return parser.nfa_to_str(automaton)
return parser.nfa_to_str(nfa)
automaton = automaton.eliminate_epsilon()
nfa = nfa.eliminate_epsilon()
if self.task == "NFA":
return parser.nfa_to_str(automaton)
return parser.nfa_to_str(nfa)
if self.task == "GRA":
return parser.reggrammar_to_str(automaton.nfa_to_reggrammar().eliminate_useless())
return parser.reggrammar_to_str(nfa.nfa_to_reggrammar().eliminate_useless())
if self.task == "REG":
return self.student_string if student_type == "REG" else \
......
......@@ -44,7 +44,7 @@ def nfa_transform(string: str, automaton_type: str) -> reg.NFA:
except ParsingError as ex:
raise ParsingError(ex.args)
def transform(string: str, automaton_type: str):
def transform(string: str, automaton_type: str) -> Union[reg.DFA, reg.NFA, reg.RegGrammar, reg.RegEx]:
try:
parser = Parser()
......@@ -63,14 +63,14 @@ def transform(string: str, automaton_type: str):
raise ParsingError(ex.args)
def check_task(automaton: Union[reg.DFA, reg.NFA], task: str) -> str:
parser = Parser()
output = ""
if task == "NFA" and isinstance(automaton, reg.NFA) and automaton.has_epsilon():
return "NFA obsahuje ε-kroky."
if isinstance(automaton, reg.NFA):
if task == "NFA" and automaton.has_epsilon():
output = "NFA obsahuje ε-kroky."
return output
if task not in {"TOT", "MIN", "TOC", "MIC"}:
return output
......
......@@ -349,7 +349,7 @@ class DFA:
if (state, character) in self.transition and \
self.transition[state, character] == dest_state:
return character
# else should not happen really
return Character("")
@staticmethod
def one_of(collection: Set[type_var]) -> type_var:
......@@ -397,8 +397,8 @@ class DFA:
return IsEmptyResult()
terminable = self.terminating_states(completed, pred)
for state in succ:
succ[state] = succ[state].intersection(terminable)
for sstate in succ:
succ[sstate] = succ[sstate].intersection(terminable)
if self.init in self.final:
return IsEmptyResult('ε', len(on_cycle.intersection(terminable)) > 0)
......
......@@ -25,18 +25,17 @@ class RegGrammar:
@staticmethod
def from_cfg(cfg: CFG) -> RegGrammar:
if cfg.is_regular():
reg_rules : RegGrammar.Rules = {}
for nonterminal in cfg.rules:
for rule in cfg.rules[nonterminal]:
new_rule = rule
# rule of type Tuple[Terminal] becomes of type Terminal, others are OK
if isinstance(rule, tuple) and len(rule) == 1:
new_rule = rule[0]
reg_rules.setdefault(nonterminal, set()).add(new_rule)
return RegGrammar(cfg.nonterminals, cfg.terminals, reg_rules, cfg.init)
assert cfg.is_regular()
reg_rules : RegGrammar.Rules = {}
for nonterminal in cfg.rules:
for rule in cfg.rules[nonterminal]:
new_rule: Union[Terminal, Tuple[Terminal, Nonterminal]] = rule
# rule of type Tuple[Terminal] becomes of type Terminal, others are OK
if isinstance(rule, tuple) and isinstance(rule[0], Terminal) and len(rule) == 1:
new_rule = rule[0]
reg_rules.setdefault(nonterminal, set()).add(new_rule)
return RegGrammar(cfg.nonterminals, cfg.terminals, reg_rules, cfg.init)
# exception
......@@ -71,7 +70,7 @@ class RegGrammar:
for terminal in self.terminals:
characters.add(Character(terminal.name))
transition: Dict[Tuple[State, Character], Set[State]] = dict()
transition: NFA.Transition = dict()
for nonterminal in self.rules:
state = State(nonterminal.name)
for rule in self.rules[nonterminal]:
......@@ -109,7 +108,6 @@ class RegGrammar:
if isinstance(rule, Terminal) or isinstance(rule, Eps):
actual.add(nonterminal)
elif rule[1] in previous:
print("tuple", nonterminal.name, rule[1].name)
actual.add(nonterminal)
actual.remove(helper)
......
......@@ -79,7 +79,7 @@ class NFA:
for state in self.states:
surroundings[state] = self.epsilon_surroundings(state)
new_transition: Dict[Tuple[State, Character], Set[State]] = {}
new_transition: NFA.Transition = {}
for state in self.states:
for character in self.characters:
reached_states = set()
......
......@@ -25,7 +25,7 @@ class ParsingError(Exception):
# This is needed because antlr is too smart and parse at least something possible
# even when input formalism and given type don't match. This way it aborts on any parsing problem.
class ErrorListener(ErrorListener):
class ErrorShouter(ErrorListener):
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
raise Exception("ERROR: when parsing line %d column %d: %s\n" % \
(line, column, msg))
......@@ -58,8 +58,7 @@ class Parser:
terminals = self.names_to_str(gra.terminals)
return f"Grammar: ({nonterminals}, {terminals}, P, {gra.init.name})\n{self.rules_to_str(gra.rules)}"
def rules_to_str(self, rules: RegGrammar.Rules) -> str:
def rules_to_str(self, rules: Union[CFG.Rules, RegGrammar.Rules]) -> str:
out = ""
for nonterminal in rules:
if len(rules[nonterminal]) == 0:
......@@ -68,7 +67,7 @@ class Parser:
out += f"{nonterminal.name} -> {rewritten}\n"
return out[:-1]
def rewrite_variant(self, variant: Union[Terminal, Tuple[Union[Terminal, Nonterminal], ...]]) -> str:
def rewrite_variant(self, variant: Union[Eps, Terminal, Tuple[Union[Terminal, Nonterminal], ...]]) -> str:
if isinstance(variant, Tuple):
return ''.join(map(lambda x: x.name, variant))
return variant.name
......@@ -107,7 +106,7 @@ class Parser:
return reg.expression.astprint()
def common_parse(self, string: str, given_lexer, given_parser, given_builder):
error_listener = ErrorListener()
error_listener = ErrorShouter()
chars = antlr4.InputStream(string)
lexer = given_lexer(chars)
lexer.addErrorListener(error_listener)
......@@ -168,7 +167,7 @@ class Parser:
def str_to_regex(self, string: str) -> RegEx:
try:
error_listener = ErrorListener()
error_listener = ErrorShouter()
chars = antlr4.InputStream(string)
lexer = RegExLexer(chars)
lexer.addErrorListener(error_listener)
......
......@@ -30,39 +30,40 @@ class NFA(NFA):
return NFA(nfa.states, nfa.characters, nfa.transition, nfa.init, nfa.final)
def nfa_to_reggrammar(self) -> RegGrammar:
nfa: NFA = self.eliminate_epsilon()
nonterminals: Set[Nonterminal] = set()
terminals: Set[Terminal] = set()
rules: RegGrammar.Rules = dict()
init = Nonterminal(self.init.name)
init = Nonterminal(nfa.init.name)
for state in self.states:
for state in nfa.states:
nonterminals.add(Nonterminal(state.name))
for character in self.characters:
for character in nfa.characters:
terminals.add(Terminal(character.name))
for state, character in self.transition:
for dest_state in self.transition[state, character]:
for state, character in nfa.transition:
for dest_state in nfa.transition[state, character]:
nonterminal1 = Nonterminal(state.name)
nonterminal2 = Nonterminal(dest_state.name)
terminal = Terminal(character.name)
rules.setdefault(nonterminal1, set()).add((terminal, nonterminal2))
if dest_state in self.final:
if dest_state in nfa.final:
rules[nonterminal1].add(terminal)
if self.init in self.final:
if nfa.init in nfa.final:
new_init = Nonterminal("<newinit>")
rules[new_init] = {Eps()}
for rule in rules[init]:
rules[new_init].add(rule)
rules[new_init].add(rules[init])
init = new_init
nonterminals.add(init)
return RegGrammar(nonterminals, terminals, rules, init)
def nfa_to_regex(self) -> RegEx:
# I don't want to have to do this really don't
pass
#def nfa_to_regex(self) -> RegEx:
# # I don't want to have to do this really don't
# pass
class RegEx(RegEx):
def regex_to_efa(self) -> NFA:
......
......@@ -81,6 +81,8 @@ class CharNode(AST):
return self.value
class RegEx:
Transition = Dict[Tuple[State, AST], Set[State]]
def __init__(self, characters: Set[Character], expression: AST):
self.characters = characters
self.expression = expression
......@@ -89,31 +91,31 @@ class RegEx:
init = State("init_state")
final = State("final_state")
states: Set[State] = {init, final}
transition: Dict[Tuple[State, AST], Set[State]] = dict()
transition: RegEx.Transition = dict()
transition[init, self.expression] = {final}
new_name = 0
while self.has_ast(transition):
changed = False
for (state, ast) in transition:
for dest_state in transition[state, ast]:
if ast.token == Bin.Union:
self.transition_add(transition, state, ast.left, dest_state)
self.transition_add(transition, state, ast.right, dest_state)
changed = True
elif ast.token == Bin.Concat:
if ast.left.token != Emptyset() and ast.right.token != Emptyset():
new_state = State(str(new_name))
states.add(new_state)
new_name += 1
self.transition_add(transition, state, ast.left, new_state)
self.transition_add(transition, new_state, ast.right, dest_state)
changed = True
elif type(ast.token) == Iter:
if type(ast) == BinOp:
if ast.token == Bin.Union:
self.transition_add(transition, state, ast.left, dest_state)
self.transition_add(transition, state, ast.right, dest_state)
changed = True
elif ast.token == Bin.Concat:
if ast.left.token != Emptyset() and ast.right.token != Emptyset():
new_state = State(str(new_name))
states.add(new_state)
new_name += 1
self.transition_add(transition, state, ast.left, new_state)
self.transition_add(transition, new_state, ast.right, dest_state)
changed = True
elif type(ast) == IterOp:
if ast.node == Emptyset():
if ast.token == Iter.Iteration:
self.transition_add(transition, state, CharNode(Eps()), dest_state)
......@@ -131,7 +133,7 @@ class RegEx:
del transition[state, ast]
break
new_transition: Dict[Tuple[State, Character], State] = dict()
new_transition: NFA.Transition = dict()
for (state, char) in transition:
if type(char.token) != Emptyset:
new_transition[state, char.token] = transition[state, char]
......@@ -144,7 +146,7 @@ class RegEx:
transition.setdefault((state, ast), set()).add(dest_state)
def has_ast(self, transition: Dict[Tuple[State, AST], State]):
def has_ast(self, transition: RegEx.Transition):
for (state, ast) in transition:
if type(ast) != CharNode:
return True
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment