How to use stack_top method in fMBT

Best Python code snippet using fMBT_python

parser.py

Source:parser.py Github

copy

Full Screen

1from assets import *2from anytree import Node, RenderTree3from codegen import Codegen 4import re5class Tree:6 def __init__(self, root):7 self.root_node = Node(root)8 self.last_node = self.root_node9 self.indexes_stack = []10 self.fathers = []11 12 def add_node(self, stack_len, node_name, father=None, token_type=None):13 if not father:14 self.pop(stack_len)15 father = self.last_node16 if token_type:17 node = Node(f"({token_type}, {node_name})", parent=father)18 else: 19 node = Node(node_name, parent=father)20 return node21 def pop(self, stack_len):22 if not self.indexes_stack or stack_len != self.indexes_stack[-1]: return23 self.last_node = self.fathers.pop()24 self.indexes_stack.pop()25 def push(self, len_terms, stack_len, grandpa):26 if len_terms <= 0: return27 self.indexes_stack.extend(range(stack_len, stack_len + len_terms))28 self.fathers.extend(len_terms * [grandpa])29 30 def __str__(self):31 tree_string = ''32 for pre, _, node in RenderTree(self.root_node.children[0]):33 tree_string += f"{pre}{node.name}\n"34 return tree_string35class Parser:36 def __init__(self, scanner, **paths):37 self.scanner = scanner38 self.syntax_errors = []39 first_dict = self.get_first_dict(path=paths.get('firsts', 'Firsts.csv'))40 follow_dict = self.get_follow_dict(path=paths.get('follows', 'Follows.csv'))41 grammar_tuples = self.get_grammar_tuple(path=paths.get('grammar', 'Grammar.csv'))42 predict_list = self.get_predict_list(path=paths.get('predicts', 'Predicts.csv'))43 start_symbol = grammar_tuples[0][0]44 self.parse_table = self.get_parse_table(grammar_tuples, first_dict, follow_dict, predict_list)45 self.non_terminals = first_dict.keys()46 self._advance_input = True47 self.stack = [start_symbol]48 self.tree = Tree(start_symbol)49 self.codegen = Codegen()50 def parse(self):51 void_type = False52 args = []53 while True:54 if self._advance_input:55 lookahead, lexeme, token_type, line_no = self._get_valid_token()56 # print(f'lookahead={lookahead}, lexeme={lexeme}, token_type={token_type}, line_no={line_no}')57 if lookahead is None and lexeme is None:58 return59 self.codegen.save_program_block()60 stack_top = self.stack[-1]61 # print(self.stack, lexeme)62 if stack_top in self.non_terminals:63 self._fetch_rules(stack_top, lookahead, line_no)64 elif stack_top == lookahead:65 self.stack.pop()66 if stack_top == EOF:67 self.tree.add_node(len(self.stack), EOF)68 self.codegen.save_program_block()69 return70 self.tree.add_node(len(self.stack), lexeme, token_type=token_type)71 self._advance_input = True72 elif lookahead == EOF:73 pass74 elif re.match('^#\w+$', stack_top):75 # print(stack_top, args)76 self.stack.pop()77 self.tree.add_node(len(self.stack), stack_top)78 if stack_top == '#jp_break':79 self.codegen.generate(stack_top, line_no)80 elif stack_top == '#type':81 void_type = lexeme == 'void'82 args.append(void_type)83 elif stack_top == '#pid':84 args.append(lexeme)85 args.append(line_no)86 self.codegen.generate(stack_top, args)87 if len(args) == 2:88 args = []89 elif stack_top in ['#var', '#save_arr']:90 self.codegen.generate(stack_top, args)91 args = []92 elif stack_top == '#pnum_arr':93 args.append(lexeme)94 self.codegen.generate('#pnum', lexeme)95 elif stack_top in ["#arg", '#function_call']:96 self.codegen.generate(stack_top, line_no)97 elif stack_top in ['#remove_fun_args']:98 args = []99 elif stack_top == '#fun_declarated':100 self.codegen.generate(stack_top)101 args = []102 elif stack_top in ['#param_arr' , '#param_var']:103 self.codegen.generate(stack_top)104 args = []105 elif stack_top[1:] in self.codegen.arg_actions:106 self.codegen.generate(stack_top, lexeme)107 else:108 self.codegen.generate(stack_top)109 self._advance_input = False110 else:111 self.stack.pop()112 self._advance_input = False113 self._add_error(line_no, 'missing', stack_top)114 self.tree.add_node(len(self.stack), lexeme, token_type=token_type)115 def _fetch_rules(self, stack_top, lookahead, line_no):116 rules = self.next_term(stack_top, lookahead)117 if rules == 'synch':118 self.stack.pop()119 self._add_error(line_no, 'missing', stack_top)120 self._advance_input = False121 elif rules == '':122 if lookahead == EOF:123 self._add_error(line_no, 'unexpected', 'EOF')124 else:125 self._add_error(line_no, 'illegal', lookahead)126 self._advance_input = True127 elif rules == [EPSILON]:128 self.stack.pop()129 father = self.tree.add_node(len(self.stack), stack_top)130 self.tree.add_node(len(self.stack), 'epsilon', father=father)131 self._advance_input = False132 else:133 self.stack.pop()134 self._push_rules(rules, stack_top)135 self._advance_input = False136 def _push_rules(self, rules, stack_top):137 stack_len = len(self.stack)138 new_node = self.tree.add_node(stack_len, stack_top)139 self.stack.extend(reversed(rules))140 self.tree.push(len(rules), stack_len, new_node)141 def _add_error(self, line_no, error_type, argument):142 msg = f'#{str(line_no+1)} : syntax_error, {error_type} {argument}'143 self.syntax_errors.append(msg)144 def is_terminal(self, term):145 return not term in self.non_terminals and not term == EOF146 def _get_valid_token(self):147 invalid_token = True # whitespace and comment are not valid tokens148 while invalid_token:149 lexeme, token_type, line_no = self.scanner.get_next_token()150 if token_type in ['KEYWORD', 'SYMBOL'] or not token_type:151 lookahead = lexeme152 invalid_token = False153 elif token_type in ['WHITESPACE', 'COMMENT']:154 pass155 else:156 invalid_token = False157 lookahead = token_type158 return lookahead, lexeme, token_type, line_no159 def write_parse_tree_to_file(self):160 with open('parse_tree.txt', 'w') as parse_tree_file:161 parse_tree_file.writelines(str(self.tree))162 def get_first_dict(self, path):163 res = {}164 with open(path, 'r') as first_file:165 for line in first_file.readlines():166 words = line.strip().split(' ')167 res[words[0]] = words[1:]168 return res169 def get_follow_dict(self, path):170 res = {}171 with open(path, 'r') as follow_file:172 for line in follow_file.readlines():173 words = line.strip().split(' ')174 res[words[0]] = words[1:]175 return res176 def get_predict_list(self, path):177 res = []178 with open(path, 'r') as predict_file:179 for line in predict_file.readlines():180 words = line.strip().split(' ')181 res.append(words) 182 return res183 def get_grammar_tuple(self, path):184 res = []185 with open(path, 'r') as grammar_file:186 for line in grammar_file.readlines():187 words = line.strip().split(' ')188 res.append((words[0], words[1:])) 189 return res190 def get_parse_table(self, grammar, first, follow, predict):191 parse_table = {nt:{} for nt in first.keys()}192 for (non_terminal, grammars), predicts in zip(grammar, predict):193 for terminal in predicts:194 parse_table[non_terminal][terminal] = grammars195 for non_terminal, follows in follow.items():196 for terminal in follows:197 if terminal in parse_table[non_terminal]: continue198 parse_table[non_terminal][terminal] = SYNCH199 return parse_table200 def next_term(self, non_terminal, terminal):201 try:202 return self.parse_table[non_terminal][terminal]203 except:204 return ''205 # TODO: remove! (JUST FOR TESTS) 206 # def print_and_save_parse_table(self, path='parse_table.txt'):207 # import pandas as pd208 # df = pd.DataFrame(self.parse_table).T209 # df.fillna('', inplace=True)210 # df.to_csv(path)211 # print(df)212 def write_syntax_errors_to_file(self):213 res = []214 if len(self.syntax_errors) == 0:215 with open('syntax_errors.txt', 'w') as file:216 file.writelines('There is no syntax error.')217 return218 for i in self.syntax_errors:219 res.append(i + '\n')220 with open('syntax_errors.txt', 'w') as file:...

Full Screen

Full Screen

ch3_topdown_parser.py

Source:ch3_topdown_parser.py Github

copy

Full Screen

1"""2A simple top-down parser.3"""4import pyactr as actr5actr.chunktype("parsing_goal", "stack_top stack_bottom parsed_word task")6actr.chunktype("sentence", "word1 word2 word3")7actr.chunktype("word", "form, cat")8parser = actr.ACTRModel()9dm = parser.decmem10g = parser.goal11imaginal = parser.set_goal(name="imaginal", delay=0.2)12dm.add(actr.chunkstring(string="""13 isa word14 form 'Mary'15 cat 'ProperN'16"""))17dm.add(actr.chunkstring(string="""18 isa word19 form 'Bill'20 cat 'ProperN'21"""))22dm.add(actr.chunkstring(string="""23 isa word24 form 'likes'25 cat 'V'26"""))27g.add(actr.chunkstring(string="""28 isa parsing_goal29 task parsing30 stack_top 'S'31"""))32imaginal.add(actr.chunkstring(string="""33 isa sentence34 word1 'Mary'35 word2 'likes'36 word3 'Bill'37"""))38parser.productionstring(name="expand: S ==> NP VP", string="""39 =g>40 isa parsing_goal41 task parsing42 stack_top 'S'43 ==>44 =g>45 isa parsing_goal46 stack_top 'NP'47 stack_bottom 'VP'48""")49parser.productionstring(name="expand: NP ==> ProperN", string="""50 =g>51 isa parsing_goal52 task parsing53 stack_top 'NP'54 ==>55 =g>56 isa parsing_goal57 stack_top 'ProperN'58""")59parser.productionstring(name="expand: VP ==> V NP", string="""60 =g>61 isa parsing_goal62 task parsing63 stack_top 'VP'64 ==>65 =g>66 isa parsing_goal67 stack_top 'V'68 stack_bottom 'NP'69""")70parser.productionstring(name="retrieve: ProperN", string="""71 =g>72 isa parsing_goal73 task parsing74 stack_top 'ProperN'75 =imaginal>76 isa sentence77 word1 =w178 ==>79 =g>80 isa parsing_goal81 task retrieving82 +retrieval>83 isa word84 form =w185""")86parser.productionstring(name="retrieve: V", string="""87 =g>88 isa parsing_goal89 task parsing90 stack_top 'V'91 =imaginal>92 isa sentence93 word1 =w194 ==>95 =g>96 isa parsing_goal97 task retrieving98 +retrieval>99 isa word100 form =w1101""")102parser.productionstring(name="scan: word", string="""103 =g>104 isa parsing_goal105 task retrieving106 stack_top =y107 stack_bottom =x108 =retrieval>109 isa word110 form =w1111 cat =y112 =imaginal>113 isa sentence114 word1 =w1115 word2 =w2116 word3 =w3117 ==>118 =g>119 isa parsing_goal120 task printing121 stack_top =x122 stack_bottom empty123 parsed_word =w1124 =imaginal>125 isa sentence126 word1 =w2127 word2 =w3128 word3 empty129 ~retrieval>130""")131parser.productionstring(name="print parsed word", string="""132 =g>133 isa parsing_goal134 task printing135 =imaginal>136 isa sentence137 word1 ~empty138 ==>139 !g>140 show parsed_word141 =g>142 isa parsing_goal143 task parsing144 parsed_word None145""")146parser.productionstring(name="done", string="""147 =g>148 isa parsing_goal149 task printing150 =imaginal>151 isa sentence152 word1 empty153 ==>154 =g>155 isa parsing_goal156 task done157 !g>158 show parsed_word159 ~imaginal>160 ~g>161""")162if __name__ == "__main__":163 parser_sim = parser.simulation()164 parser_sim.run()165 print("\nDeclarative memory at the end of the simulation:")...

Full Screen

Full Screen

stack.py

Source:stack.py Github

copy

Full Screen

1import numpy as np23class CoordinateStack:4 def __init__(self,size):5 self.stack = np.empty((2,size),dtype=np.int32)6 self.stack_top=07 self.size=size8 def push(self,x,y):9 if (self.stack_top < self.size):10 self.stack[0,self.stack_top] = x11 self.stack[1,self.stack_top] = y12 self.stack_top+=113 return True14 else:15 return False16 def pop(self):17 if (self.stack_top==0):18 return None, None19 else:20 x, y = self.stack[0,self.stack_top], self.stack[1,self.stack_top]21 self.stack_top-=122 return x, y 23 def __cost(self,a,b):24 return abs(self.stack[0,a]-self.stack[0,b]) + abs(self.stack[1,a]-self.stack[1,b])25 def __minKey(self,key,included):26 min = 100000027 for v in range(self.stack_top):28 if (included[v]==False) and self.__cost(key,v) < min:29 min = self.__cost(key,v)30 min_index = v31 if min==1:return min_index32 return min_index33 def func(self):34 print("making func")35 included = np.zeros(self.size,dtype=bool)36 final_func = CoordinateStack(self.stack_top)37 current_pos = 038 final_func.push(self.stack[0,current_pos],self.stack[1,current_pos])39 included[current_pos]=True40 count=141 while (count < self.stack_top):42 print(count,"/",self.stack_top,end="\r")43 current_pos = self.__minKey(current_pos,included)44 final_func.push(self.stack[0,current_pos],self.stack[1,current_pos])45 included[current_pos]=True46 count+=147 print("") ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run fMBT automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful