Best Python code snippet using avocado_python
SignalImplanter.py
Source:SignalImplanter.py  
...45        processed_repertoires = SignalImplanter._implant_signals(simulation_state, SignalImplanter._process_repertoire, repertoires_path)46        processed_dataset = RepertoireDataset(repertoires=processed_repertoires, labels={**(simulation_state.dataset.labels if simulation_state.dataset.labels is not None else {}),47                                                                                         **{signal.id: [True, False] for signal in simulation_state.signals}},48                                              name=simulation_state.dataset.name,49                                              metadata_file=Path(SignalImplanter._create_metadata_file(processed_repertoires, simulation_state)))50        return processed_dataset51    @staticmethod52    def _implant_signals(simulation_state: SimulationState, process_element_func, output_path: Path):53        processed_elements = []54        simulation_limits = SignalImplanter._prepare_simulation_limits(simulation_state.simulation.implantings,55                                                                       simulation_state.dataset.get_example_count())56        current_implanting_index = 057        current_implanting = simulation_state.simulation.implantings[current_implanting_index]58        for index, element in enumerate(simulation_state.dataset.get_data()):59            if current_implanting is not None and index >= simulation_limits[current_implanting.name]:60                current_implanting_index += 161                if current_implanting_index < len(simulation_limits.keys()):62                    current_implanting = simulation_state.simulation.implantings[current_implanting_index]63                else:64                    current_implanting = None65            processed_element = process_element_func(index, element, current_implanting, simulation_state, output_path)66            processed_elements.append(processed_element)67        return processed_elements68    @staticmethod69    def _process_receptor(index, receptor, implanting, simulation_state, output_path: Path = None) -> Receptor:70        if implanting is not None:71            new_receptor = receptor72            for signal in implanting.signals:73                new_receptor = signal.implant_in_receptor(new_receptor, implanting.is_noise)74        else:75            new_receptor = receptor.clone()76        for signal in simulation_state.signals:77            if signal.id not in new_receptor.metadata:78                new_receptor.metadata[signal.id] = False79        return new_receptor80    @staticmethod81    def _process_repertoire(index, repertoire, current_implanting, simulation_state, output_path: Path = None) -> Repertoire:82        if current_implanting is not None:83            new_repertoire = SignalImplanter._implant_in_repertoire(index, repertoire, current_implanting, simulation_state)84        else:85            new_metadata = {**repertoire.metadata, **{f"{signal.id}": False for signal in simulation_state.signals}}86            new_repertoire = Repertoire.build_from_sequence_objects(repertoire.sequences, simulation_state.result_path / "repertoires",87                                                                    metadata=new_metadata)88        return new_repertoire89    @staticmethod90    def _create_metadata_file(processed_repertoires: List[Repertoire], simulation_state) -> str:91        path = simulation_state.result_path / "metadata.csv"92        new_df = pd.DataFrame([{**repertoire.metadata, **{'identifier': repertoire.identifier}} for repertoire in processed_repertoires])93        new_df.drop('field_list', axis=1, inplace=True)94        new_df["filename"] = [repertoire.data_filename.name for repertoire in processed_repertoires]95        new_df.to_csv(path, index=False)96        return path97    @staticmethod98    def _implant_in_repertoire(index, repertoire, implanting, simulation_state) -> Repertoire:99        new_repertoire = copy.deepcopy(repertoire)100        for signal in implanting.signals:101            new_repertoire = signal.implant_to_repertoire(repertoire=new_repertoire,102                                                          repertoire_implanting_rate=implanting.repertoire_implanting_rate,103                                                          path=simulation_state.result_path / "repertoires/")104        for signal in implanting.signals:...worker.py
Source:worker.py  
...75        dataset_path = self.dataset_dict.get(dataset)76        if dataset_path:77            with open("{}/{}".format(dataset_path, METADATA_FILE_NAME), "r", encoding=ENCODING) as file:78                return json.load(file)79    def _create_metadata_file(self, dataset: str, metadata_dict: dict) -> None:80        if metadata_dict:81            with open("{}/{}/{}".format(self.path, dataset, METADATA_FILE_NAME), "w", encoding=ENCODING) as file:82                json.dump(metadata_dict, file)83        else:84            logger.error("metadata for {} isn't found".format(dataset))85    def init_dataset(self, dataset: str, metadata_dict: dict) -> None:86        if not self.is_exist_dataset(dataset):87            dataset_path = "{}/{}".format(self.path, dataset)88            os.mkdir(dataset_path)89            self.dataset_dict[dataset] = dataset_path90            data_path = "{}/{}".format(dataset_path, DATA_DIR)91            os.mkdir(data_path)92            self._create_metadata_file(dataset, metadata_dict)93    def compress_dataset(self, dataset: str):94        metadata_dict = self._get_metadata_dict(dataset)95        metadata_checker = self.is_correct_metadata(metadata_dict)96        sum_by_row = 097        total_sum = 098        sum_by_key = 099        print(psutil.virtual_memory())100        if metadata_checker:101            logger.info("metadata_checker was successfully checked")102            path = metadata_dict.get("path")103            schema = metadata_dict.get("schema")104            partition = metadata_dict.get("partition_key")105            data_path = "{}/{}/{}".format(self.path, dataset, DATA_DIR)106            partition_analyzer = self._get_partition_dict(schema=schema, key_is_number=True)...prop_prep.py
Source:prop_prep.py  
...33    attrs[PROP_CONFIG_PATH_ATTR] = prop_config_path34    attrs[RANDOM_NETWORK_PROP_CONFIG_PATH_ATTR] = random_network_prop_config_path35    attrs[RANDOM_NETWORK_CONFIG_PATH] = random_network_conf_path36    attrs[METADATA_FILE_PATH_ATTR] = metadata_file_path37def _create_metadata_file(attrs: dict):38    prior_set_source, index_col, prior_set_col = attrs[PRIOR_SET_SOURCE_ATTR], attrs[INDEX_COL_ATTR], attrs[PRIOR_SET_COL_ATTR]39    interactors_series = pd.read_csv(prior_set_source, index_col=index_col)[prior_set_col].dropna().astype(int)40    dict_of_lists = dict()41    for item in interactors_series.iteritems():42        viral_protein, interactor = item[0], item[1]43        if viral_protein not in dict_of_lists:44            dict_of_lists[viral_protein] = []45        dict_of_lists[viral_protein].append(str(interactor))46def _copy_and_patch_conf(attrs: dict):47    print("copying and patching conf")48    conf_to_patch = attrs[CONF_TO_PATCH_ATTR]49    metadata_file_path = attrs[METADATA_FILE_PATH_ATTR]50    virus_res_root = attrs[VIRUS_RES_ROOT_ATTR]51    virus_name = attrs[VIRUS_NAME_ATTR]...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
