How to use _generate_runners method in lisa

Best Python code snippet using lisa_python

run_context.py

Source:run_context.py Github

copy

Full Screen

...158 RunMode.TRAINING: [], RunMode.ROLLOUT: []159 }160 # Restore original CLI arguments and working directory.161 sys.argv = argv162 def _generate_runners(self, run_mode: RunMode) -> List[TrainingRunner]:163 """164 Generates training or rollout runner(s).165 :param run_mode: Run mode. See See :py:class:`~maze.maze.api.RunMode`.166 :return: Instantiated Runner instance.167 """168 cl = ConfigurationLoader(169 _run_mode=run_mode,170 _kwargs=self._auditors[run_mode].kwargs,171 _overrides=self._auditors[run_mode].overrides,172 _ephemeral_init_kwargs=self._auditors[run_mode].ephemeral_init_kwargs173 )174 cl.load()175 self._workdirs = cl.workdirs176 self._configs[run_mode] = cl.configs177 runners: List[TrainingRunner] = []178 # Change to correct working directory (necessary due to being outside of Hydra scope).179 for workdir, config in zip(self._workdirs, self._configs[run_mode]):180 with working_directory(workdir):181 # Allow non-primitives in Hydra config.182 with omegaconf.flag_override(config, "allow_objects", True) as cfg:183 # Set up and return runner.184 runner = Factory(185 base_type=TrainingRunner if run_mode == RunMode.TRAINING else RolloutRunner186 ).instantiate(cfg.runner)187 runner.setup(cfg)188 runners.append(runner)189 190 return runners191 def train(self, n_epochs: Optional[int] = None, **train_kwargs) -> None:192 """193 Trains for specified number of epochs.194 After training the trainer is reset to the overall best state encountered.195 :param n_epochs: Number of epochs to train for.196 :param train_kwargs: Arguments to pass on to197 :py:meth:`~maze.train.trainers.common.training_runner.TrainingRunner.run`.198 """199 if len(self._runners[RunMode.TRAINING]) == 0:200 self._runners[RunMode.TRAINING] = self._silence(lambda: self._generate_runners(RunMode.TRAINING))201 for i_runner, (workdir, runner) in enumerate(zip(self._workdirs, self._runners[RunMode.TRAINING])):202 with working_directory(workdir):203 self._silence(lambda: runner.run(n_epochs=n_epochs, **train_kwargs))204 # reset the runner and its policy to their overall best state if already dumped205 if os.path.exists(runner.state_dict_dump_file):206 runner.trainer.load_state(runner.state_dict_dump_file)207 # cope for --multirun setting208 policy = self.policy[i_runner] if len(self._runners[RunMode.TRAINING]) > 1 else self.policy209 policy.load_state_dict(state_dict=runner.trainer.state_dict())210 # To be updated after restructuring of (Rollout) runners.211 # def rollout(212 # self,213 # n_episodes: Optional[int] = None,214 # max_episode_steps: Optional[int] = None,215 # record_trajectory: Optional[bool] = None,216 # record_event_logs: Optional[bool] = None,217 # **rollout_kwargs218 # ) -> None:219 # """220 # Rolls out trainer's policy in specified environment.221 # :param n_episodes: Count of episodes to run.222 # :param max_episode_steps: Count of steps to run in each episode (if environment returns done, the episode223 # will be finished earlier though).224 # :param record_trajectory: Whether to record trajectory data.225 # :param record_event_logs: Whether to record event logs.226 # :param rollout_kwargs: Other arguments to pass on to rollout runner's __init__.227 # """228 #229 # assert "policy" not in rollout_kwargs, "Policy must be set at initialization time."230 #231 # # Execute rollout.232 # with working_directory(self._workdir):233 # self._runners[RunMode.ROLLOUT] = self._silence(234 # lambda: self._generate_runner(235 # RunMode.ROLLOUT,236 # kwargs=self._rollout_args,237 # overrides={238 # "hydra.run.dir": self._workdir,239 # **self._rollout_overrides,240 # **{241 # "runner." + key: val for key, val in locals().items()242 # if key not in ("self", "rollout_kwargs") and val is not None243 # },244 # **rollout_kwargs245 # }246 # )247 # )248 #249 # self._silence(250 # lambda: self._runners[RunMode.ROLLOUT].run_with(251 # env=self._runners[RunMode.TRAINING]._env_factory(),252 # wrappers=[],253 # agent=self._runners[RunMode.TRAINING]._model_composer.policy254 # )255 # )256 def _silence(self, task: Callable[[], _SilenceReturnType], dynamic: bool = True) -> _SilenceReturnType:257 """258 Suppresses output for execution of callable.259 :param task: Task to execute.260 :param dynamic: Whether to only silence a task if self._silent is True.261 """262 if dynamic and self._silent or not dynamic:263 with open(os.devnull, 'w') as devnull:264 with contextlib.redirect_stdout(devnull):265 return task()266 return task()267 @property268 def config(self) -> Dict[RunMode, Union[Optional[DictConfig], List[DictConfig]]]:269 """270 Returns Hydra DictConfigs specifying the configuration for training and rollout runners.271 :return: Dictionaries with DictConfig(s) for training and rollout each. Note that configurations are initialized272 lazily, i.e. are not available until first training or rollout are initiated.273 """274 if len(self._workdirs) > 1:275 return self._configs276 return {key: value[0] if len(value) else None for key, value in self._configs}277 @property278 def run_dir(self) -> Union[Optional[str], List[str]]:279 """280 Returns run directory/directories.281 :return: Run directory/directories. Note that run directory are initialized lazily, i.e. are not available until282 first training or rollout are initiated.283 If run in single mode, list is collapsed to a single run directory string.284 """285 if len(self._workdirs) > 1:286 return self._workdirs287 return self._workdirs[0] if len(self._workdirs) else None288 @property289 def policy(self) -> Union[TorchPolicy, List[TorchPolicy]]:290 """291 Returns policy/policies.292 :return: Policy/policies used for training and rollout. If run in single mode, list is collapsed to a single293 Policy instance.294 """295 policies = [runner.model_composer.policy for runner in self._runners[RunMode.TRAINING]]296 return policies if len(policies) > 1 else policies[0]297 def compute_action(298 self,299 observation: ObservationType,300 maze_state: Optional[MazeStateType] = None,301 env: Optional[BaseEnv] = None,302 actor_id: ActorID = None,303 deterministic: bool = False304 ) -> Union[ActionType, List[ActionType]]:305 """306 Computes action(s) with configured policy/policies.307 This wraps :meth:`maze.core.agent.policy.Policy.compute_action`.308 :return: Computed action(s) for next step. If run in single mode, list is collapsed to a single action instance.309 """310 actions = [311 runner.model_composer.policy.compute_action(observation, maze_state, env, actor_id, deterministic)312 for runner in self._runners[RunMode.TRAINING]313 ]314 return actions if len(actions) > 1 else actions[0]315 def evaluate(self, **eval_kwargs) -> Union[LogStats, List[LogStats]]:316 """317 Evaluates the trained/loaded policy with an RolloutEvaluator. By default 8 episodes are evaluated sequentially.318 :param eval_kwargs: kwargs to overwrite set (or default) initialization parameters for RolloutEvaluator. Note319 that these arguments are ignored if RolloutRunner was passed as instance in AlgorithmConfig.320 :return: Logged statistics. One LogStats object if RunContext doesn't operate in multi-run mode, otherwise a321 list thereof.322 """323 # Collect env factories and policies, wrap them in lists if they aren't already.324 env_factories = self.env_factory325 policies = self.policy326 if not isinstance(env_factories, List):327 env_factories = [env_factories]328 policies = [policies]329 # Generate rollout evaluators.330 rollout_evaluators: List[RolloutEvaluator] = []331 for runner, env_fn in zip(self._runners[RunMode.TRAINING], env_factories):332 # If rollout evaluator is not specified at all, create incomplete config with target.333 try:334 ro_eval = runner.cfg.algorithm.rollout_evaluator335 except omegaconf.errors.ConfigAttributeError:336 ro_eval = {"_target_": "maze.train.trainers.common.evaluators.rollout_evaluator.RolloutEvaluator"}337 # Override with specified arguments.338 if isinstance(ro_eval, DictConfig):339 ro_eval = omegaconf.OmegaConf.to_object(ro_eval)340 if isinstance(ro_eval, dict):341 ro_eval = {**ro_eval, **eval_kwargs}342 # Try to instantiate rollout runner directly from config. Works if completely specified in config or present343 # as instance of RolloutEvaluator.344 try:345 ro_eval = Factory(RolloutEvaluator).instantiate(ro_eval)346 # Merge with default values in case of incomplete RolloutEvaluator config.347 except TypeError:348 default_params = {349 "eval_env": SequentialVectorEnv(env_factories=[env_fn]),350 "n_episodes": 8,351 "model_selection": None,352 "deterministic": False353 }354 ro_eval = Factory(RolloutEvaluator).instantiate({**default_params, **ro_eval})355 finally:356 rollout_evaluators.append(ro_eval)357 # Evaluate policies.358 stats = [359 self._silence(360 lambda: [ro_eval.evaluate(policy), ro_eval.eval_env.get_stats(LogStatsLevel.EPOCH).last_stats][-1]361 )362 for env_factory, policy, ro_eval in zip(env_factories, policies, rollout_evaluators)363 ]364 return stats[0] if len(stats) == 0 else stats365 @property366 def env_factory(self) -> Union[Callable[[], MazeEnv], List[Callable[[], MazeEnv]]]:367 """368 Returns a newly generated environment with wrappers applied w.r.t. the specified configuration.369 :return: Environment factory function(s). One factory function if RunContext doesn't operate in multi-run mode,370 otherwise a list thereof.371 """372 if len(self._runners[RunMode.TRAINING]) == 0:373 self._runners[RunMode.TRAINING] = self._silence(lambda: self._generate_runners(RunMode.TRAINING))374 runners = self._runners[RunMode.TRAINING]...

Full Screen

Full Screen

runner.py

Source:runner.py Github

copy

Full Screen

...218 sub_runbook_builder = self._runbook_builder.derive(variables=variables)219 transformer.run(220 sub_runbook_builder, phase=constants.TRANSFORMER_PHASE_EXPANDED221 )222 runners = self._generate_runners(223 sub_runbook_builder.resolve(), variables224 )225 for runner in runners:226 yield runner227 else:228 # no combinator, use the root runbook229 transformer.run(230 self._runbook_builder, phase=constants.TRANSFORMER_PHASE_EXPANDED231 )232 for runner in self._generate_runners(233 root_runbook, self._runbook_builder.variables234 ):235 yield runner236 def _generate_runners(237 self, runbook: schema.Runbook, variables: Dict[str, VariableEntry]238 ) -> Iterator[BaseRunner]:239 # group filters by runner type240 case_variables = get_case_variables(variables)241 runner_filters: Dict[str, List[schema.BaseTestCaseFilter]] = {}242 for raw_filter in runbook.testcase_raw:243 # by default run all filtered cases unless 'enable' is specified as false244 filter = schema.load_by_type(schema.BaseTestCaseFilter, raw_filter)245 if filter.enabled:246 raw_filters: List[schema.BaseTestCaseFilter] = runner_filters.get(247 filter.type, []248 )249 if not raw_filters:250 runner_filters[filter.type] = raw_filters...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful