How to use update_test_result_message method in lisa

Best Python code snippet using lisa_python

testsuite.py

Source:testsuite.py Github

copy

Full Screen

...67 @property68 def name(self) -> str:69 return self.runtime_data.metadata.name70 @hookspec71 def update_test_result_message(self, message: TestResultMessage) -> None:72 ...73 def handle_exception(74 self, exception: Exception, log: Logger, phase: str = ""75 ) -> None:76 self.stacktrace = traceback.format_exc()77 if phase:78 phase = f"{phase} "79 if isinstance(exception, SkippedException):80 log.info(f"case skipped: {exception}")81 log.debug("case skipped", exc_info=exception)82 # case is skipped dynamically83 self.set_status(84 TestStatus.SKIPPED,85 f"{phase}skipped: {exception}",86 )87 elif isinstance(exception, PassedException):88 log.info(f"case passed with warning: {exception}")89 log.debug("case passed with warning", exc_info=exception)90 # case can be passed with a warning.91 self.set_status(92 TestStatus.PASSED,93 f"{phase}warning: {exception}",94 )95 elif isinstance(exception, BadEnvironmentStateException) or isinstance(96 exception, TcpConnectionException97 ):98 log.error("case failed with environment in bad state", exc_info=exception)99 self.set_status(TestStatus.FAILED, f"{phase}{exception}")100 assert self.environment101 self.environment.status = EnvironmentStatus.Bad102 else:103 if self.runtime_data.ignore_failure:104 log.info(105 f"case failed and ignored. "106 f"{exception.__class__.__name__}: {exception}"107 )108 self.set_status(TestStatus.ATTEMPTED, f"{phase}{exception}")109 else:110 log.error("case failed", exc_info=exception)111 self.set_status(112 TestStatus.FAILED,113 f"{phase}failed. {exception.__class__.__name__}: {exception}",114 )115 def set_status(116 self, new_status: TestStatus, message: Union[str, List[str]]117 ) -> None:118 if message:119 if isinstance(message, str):120 message = [message]121 if self.message:122 message.insert(0, self.message)123 self.message = "\n".join(message)124 if self.status != new_status:125 self.status = new_status126 if new_status == TestStatus.RUNNING:127 self._timer = create_timer()128 self._send_result_message(self.stacktrace)129 def check_environment(130 self, environment: Environment, save_reason: bool = False131 ) -> bool:132 requirement = self.runtime_data.metadata.requirement133 assert requirement.environment134 check_result = requirement.environment.check(environment.capability)135 if (136 check_result.result137 and requirement.os_type138 and environment.status == EnvironmentStatus.Connected139 ):140 for node in environment.nodes.list():141 # the UT has no OS initialized, skip the check142 if not hasattr(node, "os"):143 continue144 # use __mro__ to match any super types.145 # for example, Ubuntu satisfies Linux146 node_os_capability = search_space.SetSpace[Type[OperatingSystem]](147 is_allow_set=True, items=type(node.os).__mro__148 )149 os_result = requirement.os_type.check(node_os_capability)150 # If one of OS mismatches, mark the test case is skipped. It151 # assumes no more env can meet the requirements, instead of152 # checking the rest envs one by one. The reason is this checking153 # is a dynamic checking, and it needs to be checked in each154 # deployed environment. It may cause to deploy a lot of155 # environment for checking. In another hand, the OS should be156 # the same for all environments in the same lisa runner. So it's157 # safe to skip a test case on first os mismatched.158 if not os_result.result:159 raise SkippedException(f"OS type mismatch: {os_result.reasons}")160 if save_reason:161 if self.check_results:162 self.check_results.merge(check_result)163 else:164 self.check_results = check_result165 return check_result.result166 def _send_result_message(self, stacktrace: Optional[str] = None) -> None:167 if hasattr(self, "_timer"):168 self.elapsed = self._timer.elapsed(False)169 fields = ["status", "elapsed", "id_", "log_file"]170 result_message = TestResultMessage()171 set_filtered_fields(self, result_message, fields=fields)172 metadata_fields = [173 "area",174 "category",175 "tags",176 "description",177 "priority",178 "owner",179 ]180 metadata_information = fields_to_dict(181 src=self.runtime_data.metadata, fields=metadata_fields182 )183 self.information.update(metadata_information)184 # get information of default node, and send to notifier.185 if self.environment:186 self.information.update(self.environment.get_information())187 self.information["environment"] = self.environment.name188 result_message.information.update(self.information)189 result_message.message = self.message[0:2048] if self.message else ""190 result_message.name = self.runtime_data.metadata.name191 result_message.full_name = self.runtime_data.metadata.full_name192 result_message.suite_name = self.runtime_data.metadata.suite.name193 result_message.suite_full_name = self.runtime_data.metadata.suite.full_name194 result_message.stacktrace = stacktrace195 # some extensions may need to update or fill information.196 plugin_manager.hook.update_test_result_message(message=result_message)197 notifier.notify(result_message)198@dataclass199class TestCaseRequirement:200 environment: Optional[EnvironmentSpace] = None201 environment_status: EnvironmentStatus = EnvironmentStatus.Connected202 platform_type: Optional[search_space.SetSpace[str]] = None203 os_type: Optional[search_space.SetSpace[Type[OperatingSystem]]] = None204def _create_test_case_requirement(205 node: schema.NodeSpace,206 supported_platform_type: Optional[List[str]] = None,207 unsupported_platform_type: Optional[List[str]] = None,208 supported_os: Optional[List[Type[OperatingSystem]]] = None,209 unsupported_os: Optional[List[Type[OperatingSystem]]] = None,210 supported_features: Optional[...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful