How to use _prepare_environments method in lisa

Best Python code snippet using lisa_python

lisa_runner.py

Source:lisa_runner.py Github

copy

Full Screen

...77 for env in self.environments78 )79 return is_all_results_completed and is_all_environment_completed80 def fetch_task(self) -> Optional[Task[None]]:81 self._prepare_environments()82 self._cleanup_deleted_environments()83 self._cleanup_done_results()84 # sort environments by status85 available_environments = self._sort_environments(self.environments)86 available_results = self._sort_test_results(87 [x for x in self.test_results if x.can_run]88 )89 # check deletable environments90 delete_task = self._delete_unused_environments()91 if delete_task:92 return delete_task93 if available_results and available_environments:94 for priority in range(6):95 can_run_results = self._get_results_by_priority(96 available_results, priority97 )98 if not can_run_results:99 continue100 # it means there are test cases and environment, so it needs to101 # schedule task.102 for environment in available_environments:103 if environment.is_in_use:104 # skip in used environments105 continue106 # try to pick the designated test result.107 environment_results = [108 x109 for x in can_run_results110 if environment.source_test_result111 and x.id_ == environment.source_test_result.id_112 ]113 if not environment_results:114 environment_results = self._get_runnable_test_results(115 test_results=can_run_results, environment=environment116 )117 if not environment_results:118 continue119 task = self._dispatch_test_result(120 environment=environment, test_results=environment_results121 )122 # there is more checking conditions. If some conditions doesn't123 # meet, the task is None. If so, not return, and try next124 # conditions or skip this test case.125 if task:126 return task127 if not any(128 x.is_in_use or x.status == EnvironmentStatus.New129 for x in available_environments130 ):131 # if there is no environment in used, new, and results are132 # not fit envs. those results cannot be run.133 self._skip_test_results(can_run_results)134 elif available_results:135 # no available environments, so mark all test results skipped.136 self._skip_test_results(available_results)137 self.status = ActionStatus.SUCCESS138 return None139 def close(self) -> None:140 if hasattr(self, "environments") and self.environments:141 for environment in self.environments:142 self._delete_environment_task(environment, [])143 super().close()144 def _dispatch_test_result(145 self, environment: Environment, test_results: List[TestResult]146 ) -> Optional[Task[None]]:147 check_cancelled()148 assert test_results149 can_run_results = test_results150 # deploy151 if environment.status == EnvironmentStatus.Prepared and can_run_results:152 return self._generate_task(153 task_method=self._deploy_environment_task,154 environment=environment,155 test_results=can_run_results[:1],156 )157 # run on deployed environment158 can_run_results = [x for x in can_run_results if x.can_run]159 if environment.status == EnvironmentStatus.Deployed and can_run_results:160 selected_test_results = self._get_test_results_to_run(161 test_results=test_results, environment=environment162 )163 if selected_test_results:164 return self._generate_task(165 task_method=self._run_test_task,166 environment=environment,167 test_results=selected_test_results,168 case_variables=self._case_variables,169 )170 # Check if there is case to run in a connected environment. If so,171 # initialize the environment172 initialization_results = self._get_runnable_test_results(173 test_results=test_results,174 environment_status=EnvironmentStatus.Connected,175 environment=environment,176 )177 if initialization_results:178 return self._generate_task(179 task_method=self._initialize_environment_task,180 environment=environment,181 test_results=initialization_results,182 )183 # run on connected environment184 can_run_results = [x for x in can_run_results if x.can_run]185 if environment.status == EnvironmentStatus.Connected and can_run_results:186 selected_test_results = self._get_test_results_to_run(187 test_results=test_results, environment=environment188 )189 if selected_test_results:190 return self._generate_task(191 task_method=self._run_test_task,192 environment=environment,193 test_results=selected_test_results,194 case_variables=self._case_variables,195 )196 return None197 def _delete_unused_environments(self) -> Optional[Task[None]]:198 available_environments = self._sort_environments(self.environments)199 # check deletable environments200 for environment in available_environments:201 # if an environment is in using, or not deployed, they won't be202 # deleted until end of runner.203 if environment.is_in_use or environment.status in [204 EnvironmentStatus.New,205 EnvironmentStatus.Prepared,206 ]:207 continue208 can_run_results = self._get_runnable_test_results(209 self.test_results, environment=environment210 )211 if not can_run_results:212 # no more test need this environment, delete it.213 self._log.debug(214 f"generating delete environment task on '{environment.name}'"215 )216 return self._generate_task(217 task_method=self._delete_environment_task,218 environment=environment,219 test_results=[],220 )221 return None222 def _prepare_environments(self) -> None:223 if all(x.status != EnvironmentStatus.New for x in self.environments):224 return225 proceeded_environments: List[Environment] = []226 for candidate_environment in self.environments:227 success = True228 if candidate_environment.status == EnvironmentStatus.New:229 success = self._prepare_environment(candidate_environment)230 if success:231 proceeded_environments.append(candidate_environment)232 # sort by environment source and cost cases233 # user defined should be higher priority than test cases' requirement234 proceeded_environments.sort(key=lambda x: (not x.is_predefined, x.cost))235 self.environments = proceeded_environments236 def _deploy_environment_task(...

Full Screen

Full Screen

eta_x.py

Source:eta_x.py Github

copy

Full Screen

...73 path_results=self.config.path_results,74 path_scenarios=self.config.path_scenarios,75 )76 self.config_run.create_results_folders()77 self._prepare_environments(training)78 self._prepare_model(reset)79 log.info("Run prepared successfully.")80 def _prepare_model(self, reset: bool = False) -> None:81 """Check for existing model and load it or back it up and create a new model.82 :param reset: Flag to determine whether an existing model should be reset.83 """84 assert self.config_run is not None, (85 "Set the config_run attribute before trying to initialize the model "86 "(for example by calling prepare_run)."87 )88 assert self.environments is not None, (89 "Initialize the environments before trying to initialize the model" "(for example by calling prepare_run)."90 )91 path_model = self.config_run.path_run_model92 if path_model.is_file() and reset:93 log.info(f"Existing model detected: {path_model}")94 bak_name = path_model / f"_{datetime.fromtimestamp(path_model.stat().st_mtime).strftime('%Y%m%d_%H%M')}.bak"95 path_model.rename(bak_name)96 log.info(f"Reset is active. Existing model will be backed up. Backup file name: {bak_name}")97 elif path_model.is_file():98 log.info(f"Existing model detected: {path_model}. Loading model.")99 self.model = load_model(100 self.config.setup.agent_class,101 self.environments,102 self.config.settings.agent,103 self.config_run.path_run_model,104 tensorboard_log=self.config.setup.tensorboard_log,105 log_path=self.config_run.path_series_results,106 )107 return108 # Initialize the model if it wasn't loaded from a file109 self.model = initialize_model(110 self.config.setup.agent_class,111 self.config.setup.policy_class,112 self.environments,113 self.config.settings.agent,114 self.config.settings.seed,115 tensorboard_log=self.config.setup.tensorboard_log,116 log_path=self.config_run.path_series_results,117 )118 def _prepare_environments(self, training: bool = True) -> None:119 """Vectorize and prepare the environments and potentially the interaction environments.120 :param training: Should preparation be done for training (alternative: playing)?121 """122 assert self.config_run is not None, (123 "Set the config_run attribute before trying to initialize the environments "124 "(for example by calling prepare_run)."125 )126 env_class = self.config.setup.environment_class127 self.config_run.set_env_info(env_class)128 legacy_signature = {129 "env_id",130 "run_name",131 "general_settings",132 "path_settings",...

Full Screen

Full Screen

AbstractDatasetCreator.py

Source:AbstractDatasetCreator.py Github

copy

Full Screen

...74 self.track_offsets = track_offsets75 self.remove_type_annotations = remove_type_annotations76 self.recompute_l2g = recompute_l2g77 self.path = path78 self._prepare_environments()79 self._init_cache()80 def _init_cache(self):81 # TODO this is wrong, use standard utilities82 rnd_name = get_random_name()83 self.tmp_dir = os.path.join(tempfile.gettempdir(), rnd_name)84 if os.path.isdir(self.tmp_dir):85 shutil.rmtree(self.tmp_dir)86 os.mkdir(self.tmp_dir)87 self.local2global_cache_filename = os.path.join(self.tmp_dir, "local2global_cache.db")88 self.local2global_cache = shelve.open(self.local2global_cache_filename)89 def __del__(self):90 self.local2global_cache.close()91 shutil.rmtree(self.tmp_dir)92 # os.remove(self.local2global_cache_filename) # TODO nofile on linux, need to check93 def handle_parallel_edges(self, edges_path):94 logging.info("Handle parallel edges")95 last_id = 096 global_edge_types = set(special_mapping.keys()) | set(special_mapping.values())97 existing_global_edges = set()98 temp_edges = join(os.path.dirname(edges_path), "temp_" + os.path.basename(edges_path))99 for ind, edges in enumerate(read_edges(edges_path, as_chunks=True)):100 edges["id"] = range(last_id, len(edges) + last_id)101 edge_bank = defaultdict(list)102 ids_to_remove = set()103 for id_, type_, src, dst in edges[["id", "type", "source_node_id", "target_node_id"]].values:104 if type_ in global_edge_types:105 global_edge = (type_, src, dst)106 if global_edge not in existing_global_edges:107 existing_global_edges.add(global_edge)108 else:109 ids_to_remove.add(id_)110 else:111 edge_bank[(src, dst)].append((id_, type_))112 for key, parallel_edges in edge_bank.items():113 if len(parallel_edges) > 1:114 parallel_edges = sorted(parallel_edges, key=lambda x: self.edge_priority.get(x[1], 3))115 ids_to_remove.update(pe[0] for pe in parallel_edges[1:])116 edges = edges[117 edges["id"].apply(lambda id_: id_ not in ids_to_remove)118 ]119 edges["id"] = range(last_id, len(edges) + last_id)120 last_id = len(edges) + last_id121 kwargs = self.get_writing_mode(temp_edges.endswith("csv"), first_written=ind != 0)122 persist(edges, temp_edges, **kwargs)123 os.remove(edges_path)124 os.rename(temp_edges, edges_path)125 def post_pruning(self, nodes_path, edges_path):126 logging.info("Post pruning")127 restricted_nodes = set()128 for nodes in read_nodes(nodes_path, as_chunks=True):129 restricted_nodes.update(130 nodes[131 nodes["type"].apply(lambda type_: type_ in self.restricted_in_types)132 ]["id"]133 )134 temp_edges = join(os.path.dirname(edges_path), "temp_" + os.path.basename(edges_path))135 for ind, edges in enumerate(read_edges(edges_path, as_chunks=True)):136 edges = edges[137 edges["type"].apply(lambda type_: type_ not in self.restricted_edges)138 ]139 edges = edges[140 edges["target_node_id"].apply(lambda type_: type_ not in restricted_nodes)141 ]142 kwargs = self.get_writing_mode(temp_edges.endswith("csv"), first_written=ind != 0)143 persist(edges, temp_edges, **kwargs)144 os.remove(edges_path)145 os.rename(temp_edges, edges_path)146 def compact_mapping_for_l2g(self, global_nodes, filename):147 if len(global_nodes) > 0:148 self.update_l2g_file(149 mapping=self.create_compact_mapping(global_nodes), filename=filename150 )151 @staticmethod152 def create_compact_mapping(node_ids):153 return dict(zip(node_ids, range(len(node_ids))))154 def update_l2g_file(self, mapping, filename):155 for env_path in tqdm(self.environments, desc=f"Fixing {filename}"):156 filepath = os.path.join(env_path, filename)157 if not os.path.isfile(filepath):158 continue159 l2g = unpersist(filepath)160 l2g["global_id"] = l2g["global_id"].apply(lambda id_: mapping.get(id_, None))161 persist(l2g, filepath)162 def get_local2global(self, path):163 if path in self.local2global_cache:164 return self.local2global_cache[path]165 else:166 local2global_df = unpersist_if_present(path)167 if local2global_df is None:168 return None169 else:170 local2global = dict(zip(local2global_df['id'], local2global_df['global_id']))171 self.local2global_cache[path] = local2global172 return local2global173 @staticmethod174 def persist_if_not_none(table, dir, name):175 if table is not None:176 path = os.path.join(dir, name)177 persist(table, path)178 def write_type_annotation_flag(self, edges, output_dir):179 if len(self.type_annotation_edge_types) > 0:180 query_str = " or ".join(f"type == '{edge_type}'" for edge_type in self.type_annotation_edge_types)181 if len(edges.query(query_str)) > 0:182 with open(os.path.join(output_dir, "has_annotations"), "w") as has_annotations:183 pass184 def write_local(self, dir, local2global=None, local2global_with_ast=None, **kwargs):185 if not self.recompute_l2g:186 for var_name, var_ in kwargs.items():187 self.persist_if_not_none(var_, dir, var_name + ".bz2")188 self.persist_if_not_none(local2global, dir, "local2global.bz2")189 self.persist_if_not_none(local2global_with_ast, dir, "local2global_with_ast.bz2")190 def merge_files(self, env_path, filename, map_filename, columns_to_map, original, columns_special=None):191 input_table_path = join(env_path, filename)192 local2global = self.get_local2global(join(env_path, map_filename))193 if os.path.isfile(input_table_path) and local2global is not None:194 input_table = unpersist(input_table_path)195 if self.only_with_annotations:196 if not os.path.isfile(join(env_path, "has_annotations")):197 return original198 new_table = map_columns(input_table, local2global, columns_to_map, columns_special=columns_special)199 if original is None:200 return new_table201 else:202 return original.append(new_table)203 else:204 return original205 def read_mapped_local(self, env_path, filename, map_filename, columns_to_map, columns_special=None):206 input_table_path = join(env_path, filename)207 local2global = self.get_local2global(join(env_path, map_filename))208 if os.path.isfile(input_table_path) and local2global is not None:209 if self.only_with_annotations:210 if not os.path.isfile(join(env_path, "has_annotations")):211 return None212 input_table = unpersist(input_table_path)213 new_table = map_columns(input_table, local2global, columns_to_map, columns_special=columns_special)214 return new_table215 else:216 return None217 def get_writing_mode(self, is_csv, first_written):218 kwargs = {}219 if first_written is True:220 kwargs["mode"] = "a"221 if is_csv:222 kwargs["header"] = False223 return kwargs224 def create_global_file(225 self, local_file, local2global_file, columns, output_path, message, ensure_unique_with=None,226 columns_special=None227 ):228 assert output_path.endswith("json") or output_path.endswith("csv")229 if ensure_unique_with is not None:230 unique_values = set()231 else:232 unique_values = None233 first_written = False234 for ind, env_path in tqdm(235 enumerate(self.environments), desc=message, leave=True,236 dynamic_ncols=True, total=len(self.environments)237 ):238 mapped_local = self.read_mapped_local(239 env_path, local_file, local2global_file, columns, columns_special=columns_special240 )241 if mapped_local is not None:242 if unique_values is not None:243 unique_verify = list(zip(*(mapped_local[col_name] for col_name in ensure_unique_with)))244 mapped_local = mapped_local.loc[245 map(lambda x: x not in unique_values, unique_verify)246 ]247 unique_values.update(unique_verify)248 kwargs = self.get_writing_mode(output_path.endswith("csv"), first_written)249 persist(mapped_local, output_path, **kwargs)250 first_written = True251 # def create_global_file(252 # self, local_file, local2global_file, columns, output_path, message, ensure_unique_with=None,253 # columns_special=None254 # ):255 # global_table = None256 # for ind, env_path in tqdm(257 # enumerate(self.environments), desc=message, leave=True,258 # dynamic_ncols=True, total=len(self.environments)259 # ):260 # global_table = self.merge_files(261 # env_path, local_file, local2global_file, columns, global_table, columns_special=columns_special262 # )263 #264 # if ensure_unique_with is not None:265 # global_table = global_table.drop_duplicates(subset=ensure_unique_with)266 #267 # if global_table is not None:268 # global_table.reset_index(drop=True, inplace=True)269 # assert len(global_table) == len(global_table.index.unique())270 #271 # persist(global_table, output_path)272 def filter_orphaned_nodes(self, nodes_path, edges_path):273 logging.info("Filter orphaned nodes")274 active_nodes = set()275 for edges in read_edges(edges_path, as_chunks=True):276 active_nodes.update(edges['source_node_id'])277 active_nodes.update(edges['target_node_id'])278 temp_nodes = join(os.path.dirname(nodes_path), "temp_" + os.path.basename(nodes_path))279 for ind, nodes in enumerate(read_nodes(nodes_path, as_chunks=True)):280 nodes = nodes[281 nodes['id'].apply(lambda id_: id_ in active_nodes)282 ]283 kwargs = self.get_writing_mode(temp_nodes.endswith("csv"), first_written=ind != 0)284 persist(nodes, temp_nodes, **kwargs)285 os.remove(nodes_path)286 os.rename(temp_nodes, nodes_path)287 def join_files(self, files, local2global_filename, output_dir):288 for file in files:289 params = copy(self.merging_specification[file])290 params["output_path"] = join(output_dir, params.pop("output_path"))291 self.create_global_file(file, local2global_filename, message=f"Merging {file}", **params)292 def merge_graph_without_ast(self, output_path):293 self.join_files(self.files_for_merging, "local2global.bz2", output_path)294 get_path = partial(join, output_path)295 nodes_path = get_path("common_nodes.json")296 edges_path = get_path("common_edges.json")297 self.filter_orphaned_nodes(298 nodes_path,299 edges_path,300 )301 node_names = self.extract_node_names(302 nodes_path, min_count=2303 )304 if node_names is not None:305 persist(node_names, get_path("node_names.json"))306 self.handle_parallel_edges(edges_path)307 if self.visualize:308 self.visualize_func(309 read_nodes(nodes_path),310 read_edges(edges_path),311 get_path("visualization.pdf")312 )313 def merge_graph_with_ast(self, output_path):314 self.join_files(self.files_for_merging_with_ast, "local2global_with_ast.bz2", output_path)315 get_path = partial(join, output_path)316 nodes_path = get_path("common_nodes.json")317 edges_path = get_path("common_edges.json")318 if self.remove_type_annotations:319 self.filter_type_edges(nodes_path, edges_path)320 self.handle_parallel_edges(edges_path)321 self.post_pruning(nodes_path, edges_path)322 self.filter_orphaned_nodes(323 nodes_path,324 edges_path,325 )326 # persist(global_nodes, get_path("common_nodes.json"))327 node_names = self.extract_node_names(328 nodes_path, min_count=2329 )330 if node_names is not None:331 persist(node_names, get_path("node_names.json"))332 if self.visualize:333 self.visualize_func(334 read_nodes(nodes_path),335 read_edges(edges_path),336 get_path("visualization.pdf")337 )338 @abstractmethod339 def create_output_dirs(self, output_path):340 pass341 @abstractmethod342 def _prepare_environments(self):343 pass344 @staticmethod345 @abstractmethod346 def filter_type_edges(nodes, edges):347 pass348 @staticmethod349 @abstractmethod350 def extract_node_names(nodes, min_count):351 pass352 @abstractmethod353 def do_extraction(self):354 pass355 @abstractmethod356 def merge(self, output_directory):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful