How to use execute_actions method in autotest

Best Python code snippet using autotest_python

muti_agent.py

Source:muti_agent.py Github

copy

Full Screen

...98 time = start99 while time <= end:100 self.EventQueue.put((time, house))101 time += interval102 def execute_actions(self, agent_host, action):103 # agent_host.sendCommand(action)104 # time.sleep(0)105 # world_state = agent_host.getWorldState()106 # for error in world_state.errors:107 # print("Error:", error.text)108 return109 def face_to(self,agent,face):110 # if self.agent_face[agent]>face:111 # for i in range(self.agent_face[agent]-face):112 # self.execute_actions(self.agent_host[agent],"turn -0.8")113 # time.sleep(0.8)114 # self.agent_face[agent] = face115 # elif self.agent_face[agent]<face:116 # for i in range(face-self.agent_face[agent]):117 # self.execute_actions(self.agent_host[agent],"turn 0.8")118 # time.sleep(0.8)119 # self.agent_face[agent] = face120 return121 def move_north(self,agent):122 # self.face_to(agent,1)123 # for i in range(7):124 # self.execute_actions(self.agent_host[agent],'movenorth 1')125 # time.sleep(0)126 if agent == 0:127 self.Agent_Location = [(self.Agent_Location[0][0],self.Agent_Location[0][1]-1),self.Agent_Location[1]]128 else:129 self.Agent_Location = [self.Agent_Location[0], (self.Agent_Location[1][0],self.Agent_Location[1][1]-1)]130 131 def move_south(self,agent):132 # self.face_to(agent,3)133 # for i in range(7):134 # self.execute_actions(self.agent_host[agent],'movesouth 1')135 # time.sleep(0)136 if agent == 0:137 self.Agent_Location = [(self.Agent_Location[0][0],self.Agent_Location[0][1]+1),self.Agent_Location[1]]138 else:139 self.Agent_Location = [self.Agent_Location[0], (self.Agent_Location[1][0],self.Agent_Location[1][1]+1)]140 def move_east(self, agent):141 # self.face_to(agent,2)142 # for i in range(7):143 # self.execute_actions(self.agent_host[agent],'moveeast 1')144 # time.sleep(0)145 if agent == 0:146 self.Agent_Location = [(self.Agent_Location[0][0]+1,self.Agent_Location[0][1]),self.Agent_Location[1]]147 else:148 self.Agent_Location = [self.Agent_Location[0], (self.Agent_Location[1][0]+1,self.Agent_Location[1][1])]149 150 def move_west(self, agent):151 # self.face_to(agent,4)152 # for i in range(7):153 # self.execute_actions(self.agent_host[agent],'movewest 1')154 # time.sleep(0)155 if agent == 0:156 self.Agent_Location = [(self.Agent_Location[0][0]-1,self.Agent_Location[0][1]),self.Agent_Location[1]]157 else:158 self.Agent_Location = [self.Agent_Location[0], (self.Agent_Location[1][0]-1,self.Agent_Location[1][1])]159 def deliever_food(self, agent):160 # if agent == 0:161 # self.execute_actions(self.agent_host[agent],'moveeast 1')162 # self.execute_actions(self.agent_host[agent],'moveeast 1')163 # self.execute_actions(self.agent_host[agent],'moveeast 1')164 # self.execute_actions(self.agent_host[agent],'movenorth 1')165 # time.sleep(0)166 # loc = self.Agent_Location[agent]167 # self.execute_actions(self.agent_host[0],"chat /fill "+str(loc[0]*7+2)+" 8 "+str(loc[1]*7+2)+" "+str(loc[0]*7+4)+" 8 "+str(loc[1]*7+3)+" minecraft:air")168 # self.execute_actions(self.agent_host[agent],'movesouth 1')169 # self.execute_actions(self.agent_host[agent],'movewest 1')170 # self.execute_actions(self.agent_host[agent],'movewest 1')171 # self.execute_actions(self.agent_host[agent],'movewest 1')172 # else:173 # self.execute_actions(self.agent_host[agent],'moveeast 1')174 # self.execute_actions(self.agent_host[agent],'moveeast 1')175 # time.sleep(0)176 # loc = self.Agent_Location[agent]177 # self.execute_actions(self.agent_host[0],"chat /fill "+str(loc[0]*7+2)+" 8 "+str(loc[1]*7+2)+" "+str(loc[0]*7+4)+" 8 "+str(loc[1]*7+3)+" minecraft:air")178 # self.execute_actions(self.agent_host[agent],'movewest 1')179 # self.execute_actions(self.agent_host[agent],'movewest 1')180 return 181 def get_path(self, source, dest):182 action = []183 x_axis = dest[0]-source[0]184 y_axis = dest[1]-source[1]185 if x_axis>0:186 for i in range(x_axis):187 action.append('E')188 elif x_axis<0:189 for i in range(-x_axis):190 action.append('W')191 if y_axis>0:192 for i in range(y_axis):193 action.append('S')194 elif y_axis<0:195 for i in range(-y_axis):196 action.append('N')197 return action198 199 def extract_action_list_from_path(self, path_list):200 201 action_trans = {'N':self.move_north,'S':self.move_south,'E':self.move_east,'W':self.move_west}202 alist = []203 for i in path_list:204 alist.append(action_trans[i])205 return alist206 207 def act(self, agent):208 if self.Agent_Action[agent][0] == None:209 self.Agent_Action[agent].pop(0)210 if Tracing:211 print("Stay")212 else:213 self.Agent_Action[agent][0](agent)214 self.Agent_Action[agent].pop(0)215 if Tracing:216 print("agent",agent, "now is on",self.Agent_Location[agent])217 for i in self.House:218 if i.location == self.Agent_Location[agent] and i.state == True:219 if Tracing:220 print("************Agent", agent, "Change the house", i.id)221 i.state = False222 self.deliever_food(agent)223 total = 0224 for order in i.orderTime:225 total += self.Clock - order226 i.orderTime = []227 self.total_waiting += (total)228 return 1000 - (total)229 return 0230 def set_toDoAction(self,dest,agent):231 if Tracing:232 print("\t\t\t\tplan to send",agent, "to", dest)233 if dest == self.Agent_Location[agent]:234 self.Agent_Action[agent] = [None]235 if Tracing:236 print("\t\t\t\tWait")237 return238 path = self.get_path(self.Agent_Location[agent], dest)239 extract_action_list = self.extract_action_list_from_path(path) 240 if Tracing:241 print("\t\t\t\t",path,sep="")242 print("\t\t\t\t^^^^^^^^^^^")243 self.Agent_Action[agent] = extract_action_list244 return 245 def get_possible_actions(self,agent):246 return [i.location for i in self.House]247 248 def get_curr_state(self):249 return self.Agent_Location.copy() + [i for i in self.House] 250 def choose_action(self, curr_state, possible_actions, eps,agent):251 if curr_state not in self.q_table[agent]:252 self.q_table[agent][curr_state] = {}253 for action in possible_actions:254 if action not in self.q_table[agent][curr_state]:255 self.q_table[agent][curr_state][action] = 0256 if len(possible_actions) == 0:257 return None258 actionProb = random.uniform(0.0, 1.0)259 # using the random move260 if actionProb <= eps:261 # actionIndex = random.randint(0, len(possible_actions) - 1)262 return possible_actions[random.randint(0, len(possible_actions) - 1)]263 # using greedy action264 else:265 maxQValue = min(self.q_table[agent][curr_state].values()) - 1266 actionList = []267 for (action, qValue) in self.q_table[agent][curr_state].items():268 if qValue > maxQValue:269 actionList.clear()270 maxQValue = qValue271 actionList.append(action)272 elif qValue == maxQValue:273 actionList.append(action)274 return actionList[random.randint(0, len(actionList) - 1)]275 def transferCurrState(self, state):276 # transfer the state into (num)...277 copy_state = []278 for i in state:279 if type(i) == House:280 if i.state == True:281 copy_state.append(i.location)282 else:283 copy_state.append(i)284 return tuple(copy_state)285 def best_policy(self):286 returnAction = {"0":[],"1":[]}287 s0 = self.transferCurrState(self.get_curr_state())288 possible_actions1= self.get_possible_actions(0)289 possible_actions2= self.get_possible_actions(1)290 agent1_a0 = self.choose_action(s0, possible_actions1, -1, 0)291 agent2_a0 = self.choose_action(s0, possible_actions2, -1, 1)292 self.set_toDoAction(agent1_a0,0)293 self.set_toDoAction(agent2_a0,1)294 returnAction["0"].append(agent1_a0)295 returnAction["1"].append(agent2_a0)296 done_update = False297 while not done_update:298 count = 0299 if self.EventQueue.qsize() != 0:300 for i in self.EventQueue.queue:301 if i[0] == self.Clock:302 count += 1303 for i in range(count):304 event_time, house = self.EventQueue.get()305 house.state = True 306 house.orderTime.append(event_time)307 loc = house.location 308 self.execute_actions(self.agent_host[0],"chat /fill "+str(loc[0]*7+2)+" 8 "+str(loc[1]*7+2)+" "+str(loc[0]*7+4)+" 8 "+str(loc[1]*7+3)+" minecraft:gold_block")309 if Tracing:310 print("----------------------------------------------------")311 print("Now",self.Clock)312 print("Houses:")313 for i in self.House:314 print("|",i.location,i.state)315 for agent in [0,1]:316 current_r = self.act(agent)317 if self.Agent_Action[agent] == []:318 if (self.EventQueue.qsize() == 0 and [i.state for i in self.House] == [False for i in self.House]):319 if agent == 0:320 returnAction["1"].pop(-1)321 return returnAction322 else:323 s = self.transferCurrState(self.get_curr_state())324 possible_actions = self.get_possible_actions(agent)325 next_a = self.choose_action(s, possible_actions, -1,agent)326 self.set_toDoAction(next_a,agent)327 returnAction[str(agent)].append(next_a)328 self.Clock += 1329 330 return returnAction331 def update_q_table(self, tau, S, A, R, T,agent):332 333 # curr_s = S.popleft()334 # curr_a = A.popleft()335 # curr_r = R.popleft()336 # G = sum([self.gamma ** i * R[i] for i in range(len(S))])337 # if tau + self.n < T:338 # G += self.gamma ** self.n * self.q_table[agent][S[-1]][A[-1]]339 # old_q = self.q_table[agent][curr_s][curr_a]340 # self.q_table[agent][curr_s][curr_a] = old_q + self.alpha * (G - old_q)341 342 curr_s = S.popleft()343 curr_a = A.popleft()344 R.popleft()345 if len(S) != 0:346 nextMaxReward = max(self.q_table[agent][S[0]].values())347 else:348 nextMaxReward = 0349 if len(S) != 0:350 self.q_table[agent][curr_s][curr_a] = self.q_table[agent][curr_s][curr_a] + self.alpha * (R[0] + self.gamma * nextMaxReward) 351 else:352 self.q_table[agent][curr_s][curr_a] = self.q_table[agent][curr_s][curr_a] + self.alpha * (self.gamma * nextMaxReward) 353 # def update_q_table(self, tau, S, A, R, T,agent):354 355 # curr_s = S.popleft()356 # curr_a = A.popleft()357 # R.popleft()358 # nextMaxReward = max(self.q_table[agent][S[0]].values())359 # self.q_table[agent][curr_s][curr_a] = self.q_table[agent][curr_s][curr_a] + self.alpha * (R[0] + self.gamma * nextMaxReward - self.q_table[agent][curr_s][curr_a]) 360 def run(self):361 S = [deque(), deque()]362 A = [deque(), deque()]363 R = [deque(), deque()]364 present_reward = 0365 done_update = False366 while not done_update:367 s0 = self.transferCurrState(self.get_curr_state())368 possible_actions1= self.get_possible_actions(0)369 possible_actions2= self.get_possible_actions(1)370 agent1_a0 = self.choose_action(s0, possible_actions1, self.epsilon, 0)371 agent2_a0 = self.choose_action(s0, possible_actions2, self.epsilon, 1)372 self.set_toDoAction(agent1_a0,0)373 self.set_toDoAction(agent2_a0,1)374 S[0].append(s0)375 S[1].append(s0)376 A[0].append(agent1_a0)377 A[1].append(agent2_a0)378 R[0].append(0)379 R[1].append(0)380 T = sys.maxsize381 for t in range(sys.maxsize):382 time.sleep(0.1)383 # eventQueue update384 count = 0385 if self.EventQueue.qsize() != 0:386 for i in self.EventQueue.queue:387 if i[0] == self.Clock:388 count += 1389 for i in range(count):390 event_time, house = self.EventQueue.get()391 house.state = True 392 house.orderTime.append(event_time)393 loc = house.location 394 self.execute_actions(self.agent_host[0],"chat /fill "+str(loc[0]*7+2)+" 8 "+str(loc[1]*7+2)+" "+str(loc[0]*7+4)+" 8 "+str(loc[1]*7+3)+" minecraft:gold_block")395 #-----------------------396 if Tracing:397 print("----------------------------------------------------")398 print("Now",self.Clock)399 print("Houses:")400 for i in self.House:401 print("|",i.location,i.state)402 #-----------------------403 if t < T:404 #和之前是一样的逻辑结构,只不过现在有两个agent405 for agent in [0,1]:406 current_r = self.act(agent)407 if self.Agent_Action[agent] == []:#只有当到达了某一个房子才需要计算 reward, update q-table, choose action408 R[agent].append(current_r)...

Full Screen

Full Screen

evaluate_dagger.py

Source:evaluate_dagger.py Github

copy

Full Screen

1import copy2import numpy as np3import torch4import os5import sys6sys.path.insert(0, os.environ['ALFWORLD_ROOT'])7from agents.utils.misc import extract_admissible_commands8def evaluate_dagger(env, agent, num_games, debug=False):9 env.seed(42)10 agent.eval()11 episode_no = 012 res_points, res_steps, res_gcs = [], [], []13 res_info = []14 final_dynamics = {}15 with torch.no_grad():16 while(True):17 if episode_no >= num_games:18 break19 obs, infos = env.reset()20 game_names = infos["extra.gamefile"]21 batch_size = len(obs)22 if agent.unstick_by_beam_search:23 smart = [{"not working": [], "to try": []} for _ in range(batch_size)]24 agent.init(batch_size)25 previous_dynamics = None26 execute_actions = []27 prev_step_dones, prev_rewards = [], []28 for _ in range(batch_size):29 execute_actions.append("restart")30 prev_step_dones.append(0.0)31 prev_rewards.append(0.0)32 observation_strings = list(obs)33 task_desc_strings, observation_strings = agent.get_task_and_obs(observation_strings)34 task_desc_strings = agent.preprocess_task(task_desc_strings)35 observation_strings = agent.preprocess_observation(observation_strings)36 first_sight_strings = copy.deepcopy(observation_strings)37 agent.observation_pool.push_first_sight(first_sight_strings)38 if agent.action_space == "exhaustive":39 action_candidate_list = [extract_admissible_commands(intro, obs) for intro, obs in zip(first_sight_strings, observation_strings)]40 else:41 action_candidate_list = list(infos["admissible_commands"])42 action_candidate_list = agent.preprocess_action_candidates(action_candidate_list)43 observation_strings = [item + " [SEP] " + a for item, a in zip(observation_strings, execute_actions)] # appending the chosen action at previous step into the observation44 still_running_mask = []45 sequence_game_points = []46 goal_condition_points = []47 print_actions = []48 report = agent.report_frequency > 0 and (episode_no % agent.report_frequency <= (episode_no - batch_size) % agent.report_frequency)49 if debug:50 print(first_sight_strings[0])51 print(task_desc_strings[0])52 for step_no in range(agent.max_nb_steps_per_episode):53 # push obs into observation pool54 agent.observation_pool.push_batch(observation_strings)55 # get most recent k observations56 most_recent_observation_strings = agent.observation_pool.get()57 # predict actions58 if agent.action_space == "generation":59 # heuristically unstick the agent from generating the same thing over and over again60 prev_actions = copy.copy(execute_actions)61 if agent.unstick_by_beam_search:62 for i in range(batch_size):63 if "Nothing happens" in observation_strings[i]:64 smart[i]["not working"].append(execute_actions[i])65 execute_actions, current_dynamics = agent.command_generation_greedy_generation(most_recent_observation_strings, task_desc_strings, previous_dynamics)66 # heuristically unstick the agent from generating the same thing over and over again67 if agent.unstick_by_beam_search:68 for i in range(batch_size):69 if "Nothing happens" in observation_strings[i] and execute_actions[i] in smart[i]["not working"]:70 if len(smart[i]["to try"]) == 0:71 bs_actions, _ = agent.command_generation_beam_search_generation(most_recent_observation_strings[i: i + 1], task_desc_strings[i: i + 1], None if previous_dynamics is None else previous_dynamics[i: i + 1])72 bs_actions = bs_actions[0]73 smart[i]["to try"] += bs_actions74 smart[i]["to try"] = [item for item in smart[i]["to try"] if item != prev_actions[i]]75 if len(smart[i]["to try"]) > 0:76 execute_actions[i] = smart[i]["to try"][0]77 else:78 smart[i] = {"not working": [], "to try": []} # reset79 elif agent.action_space in ["admissible", "exhaustive"]:80 execute_actions, _, current_dynamics = agent.admissible_commands_greedy_generation(most_recent_observation_strings, task_desc_strings, action_candidate_list, previous_dynamics)81 else:82 raise NotImplementedError()83 obs, _, dones, infos = env.step(execute_actions)84 scores = [float(item) for item in infos["won"]]85 gcs =[float(item) for item in infos["goal_condition_success_rate"]] if "goal_condition_success_rate" in infos else [0.0]*batch_size86 dones = [float(item) for item in dones]87 if debug:88 print(execute_actions[0])89 print(obs[0])90 observation_strings = list(obs)91 observation_strings = agent.preprocess_observation(observation_strings)92 if agent.action_space == "exhaustive":93 action_candidate_list = [extract_admissible_commands(intro, obs) for intro, obs in zip(first_sight_strings, observation_strings)]94 else:95 action_candidate_list = list(infos["admissible_commands"])96 action_candidate_list = agent.preprocess_action_candidates(action_candidate_list)97 observation_strings = [item + " [SEP] " + a for item, a in zip(observation_strings, execute_actions)] # appending the chosen action at previous step into the observation98 previous_dynamics = current_dynamics99 if step_no == agent.max_nb_steps_per_episode - 1:100 # terminate the game because DQN requires one extra step101 dones = [1.0 for _ in dones]102 still_running = [1.0 - float(item) for item in prev_step_dones] # list of float103 prev_step_dones = dones104 step_rewards = [float(curr) - float(prev) for curr, prev in zip(scores, prev_rewards)] # list of float105 prev_rewards = scores106 sequence_game_points.append(step_rewards)107 goal_condition_points.append(gcs)108 still_running_mask.append(still_running)109 print_actions.append(execute_actions[0] if still_running[0] else "--")110 # if all ended, break111 if np.sum(still_running) == 0:112 break113 try:114 for n in range(batch_size):115 Thor = env.envs[n]116 task_name = Thor.env.save_frames_path117 final_dynamics[task_name] = {}118 final_dynamics[task_name]["final_dynamics"] = current_dynamics[n].to('cpu').numpy()119 final_dynamics[task_name]["label"] = Thor.traj_data['task_type']120 except Exception as e:121 pass122 game_steps = np.sum(np.array(still_running_mask), 0).tolist() # batch123 game_points = np.max(np.array(sequence_game_points), 0).tolist() # batch124 game_gcs = np.max(np.array(goal_condition_points), 0).tolist() # batch125 for i in range(batch_size):126 if len(res_points) >= num_games:127 break128 res_points.append(game_points[i])129 res_gcs.append(game_gcs[i])130 res_steps.append(game_steps[i])131 res_info.append("/".join(game_names[i].split("/")[-3:-1]) + ", score: " + str(game_points[i]) + ", step: " + str(game_steps[i]))132 # finish game133 agent.finish_of_episode(episode_no, batch_size)134 episode_no += batch_size135 if not report:136 continue137 print("Model: {:s} | Episode: {:3d} | {:s} | game points: {:2.3f} | game goal-condition points: {:2.3f} | game steps: {:2.3f}".format(agent.experiment_tag, episode_no, game_names[0], np.mean(res_points), np.mean(res_gcs), np.mean(res_steps)))138 # print(game_id + ": " + " | ".join(print_actions))139 print(" | ".join(print_actions))140 average_points, average_gc_points, average_steps = np.mean(res_points), np.mean(res_gcs), np.mean(res_steps)141 print("================================================")142 print("eval game points: " + str(average_points) + ", eval game goal-condition points : " + str(average_gc_points) + ", eval game steps: " + str(average_steps))143 for item in res_info:144 print(item)145 return {146 'average_points': average_points,147 'average_goal_condition_points': average_gc_points,148 'average_steps': average_steps,149 'res_points': res_points,150 'res_gcs': res_gcs,151 'res_steps': res_steps,152 'res_info': res_info...

Full Screen

Full Screen

navigator.py

Source:navigator.py Github

copy

Full Screen

1from __future__ import annotations2from typing import Optional, Union3from dataclasses import dataclass4from src.svc import common5from src.svc.common import error6from src.svc.common.states import State, SPACE_LITERAL, tree7@dataclass8class Navigator:9 """10 ## Traces user's path between states11 So he can use `← Back` button12 """13 trace: list[State]14 """15 # Example:16 ```17 [ Settings.I_MAIN, Settings.II_GROUP, Settings.I_BROADCAST ]18 ^ ^19 where started current state20 ```notpython21 """22 back_trace: list[State]23 """24 # Current state moves here when you press `Back` button25 - so you can use "Next" button26 """27 ignored: set[State]28 """29 # States that user is not supposed to get to30 """31 everything: Optional[common.CommonEverything]32 """33 # Last recieved event34 ## Used35 - to pass it to `on_enter`, `on_exit` methods of states36 """37 @classmethod38 def default(cls: type[Navigator]):39 return cls(40 trace = [tree.Init.I_MAIN],41 back_trace = [],42 ignored = set(),43 everything = None44 )45 @property46 def current(self) -> Optional[State]:47 """48 ## Last state from `trace`49 """50 if len(self.trace) < 1:51 return None52 return self.trace[-1]53 @property54 def first(self) -> Optional[State]:55 if len(self.trace) < 1:56 return None57 58 return self.trace[0]59 @property60 def current_back_trace(self) -> Optional[State]:61 """62 ## Last state from `back_trace`63 """64 if len(self.back_trace) < 1:65 return None66 67 return self.back_trace[-1]68 @property69 def space(self) -> SPACE_LITERAL:70 """71 ## Get space of current state72 """73 return self.current.space74 @property75 def previous_space(self) -> SPACE_LITERAL:76 for state in reversed(self.trace):77 if state.space != self.space:78 return state.space79 80 return self.space81 def append(self, state: State):82 """ ## Add state to trace """83 if state in self.ignored:84 raise error.GoingToIgnoredState(85 f"appending {state.anchor} that is ignored: {self.ignored}"86 )87 88 if self.current is state:89 raise error.DuplicateState(90 f"appending {state.anchor} that was the current one"91 )92 93 if self.current_back_trace is state:94 return self.next()95 self.append_no_checks(state)96 def append_no_checks(self, state: State):97 # we exit current state, 98 # but it will remain99 # in trace, we can go back100 # to it, so we call `on_traced_exit`101 if self.current:102 self.current.on_traced_exit(self.everything)103 104 self.trace.append(state)105 # we entered a state106 # for the first time,107 # it wasn't in trace before,108 # so we call `on_enter`109 self.current.on_enter(self.everything)110 def back(111 self, 112 trace_it: bool = True,113 execute_actions: bool = True114 ):115 """116 # Remove last state from current space117 ## Params118 - `trace_it` - if current state119 should be appended to `back_trace`120 - `execute_actions` - we can assign121 actions to do when we enter/exit122 specific state. This parameter123 tells if we should execute them124 """125 if len(self.trace) < 2:126 return None127 if trace_it and self.trace[-1].back_trace:128 self.back_trace.append(self.current)129 if execute_actions:130 # this state won't be in trace131 # anymore, so we call `on_exit`132 self.current.on_exit(self.everything)133 del self.trace[-1]134 if execute_actions:135 # state we just got to was136 # in trace, so we call `on_traced_enter`137 self.current.on_traced_enter(self.everything)138 def next(self):139 if len(self.back_trace) > 0:140 self.append_no_checks(self.current_back_trace)141 del self.back_trace[-1]142 143 def delete(self, state: State) -> bool:144 for (i, traced_state) in enumerate(self.trace):145 if traced_state == state:146 # MOTHERFUCKER GETS EJECTED147 traced_state.on_delete(self.everything)148 del self.trace[i]149 return True150 151 return False152 153 def delete_back_trace(self, state: State) -> bool:154 for (i, traced_state) in enumerate(self.back_trace):155 if traced_state == state:156 # MOTHERFUCKER GETS EJECTED157 traced_state.on_delete(self.everything)158 159 del self.back_trace[i]160 return True161 162 return False163 @property164 def spaces(self) -> set[SPACE_LITERAL]:165 unique_spaces: set[SPACE_LITERAL] = set()166 for state in self.trace:167 unique_spaces.add(state.space)168 169 return unique_spaces170 def is_space_mixed(self) -> bool: 171 return len(self.spaces) > 1172 def jump_back_to(173 self, 174 state: State, 175 trace_it: bool = False, 176 execute_actions: bool = True177 ):178 if state not in self.trace:179 raise error.ThisStateNotInTrace(180 "you tried to jump back to state "181 "that is not in trace"182 )183 184 while self.current != state:185 self.back(186 trace_it = trace_it, 187 execute_actions = execute_actions188 )189 190 def space_jump_back(self, trace_it: bool = False):191 if not self.is_space_mixed():192 raise error.NotSpaceMixed(193 "jumping back from current space to other isn't possible, "194 "'cause there's only one type of space in trace"195 )196 initial_space = self.space197 while initial_space == self.space:198 self.back(trace_it = trace_it)199 200 def jump_back_to_or_append(self, state: State, trace_it: bool = False):201 try:202 self.jump_back_to(state, trace_it = trace_it)203 except error.ThisStateNotInTrace:204 self.append(state)205 206 def clear(self) -> None:207 self.trace = []208 self.back_trace = []209 self.ignored = set()210 211 def auto_ignored(self):212 if tree.Init.I_MAIN in self.spaces:213 self.ignored.add(tree.Settings.I_MAIN)214 215 if tree.Settings.I_MAIN in self.spaces:216 ...217 218 if not self.everything.is_group_chat:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful