How to use _test_run_completed method in lisa

Best Python code snippet using lisa_python

onehops_test_runner.py

Source:onehops_test_runner.py Github

copy

Full Screen

1"""2SRI Testing Report utility functions.3"""4from typing import Optional, Dict, Tuple, List, Generator5from datetime import datetime6import re7from translator.sri.testing.processor import CMD_DELIMITER, WorkerProcess8from tests.onehop import ONEHOP_TEST_DIRECTORY9from translator.sri.testing.report_db import (10 TestReport,11 TestReportDatabase,12 get_test_report_database13)14import logging15logger = logging.getLogger()16logger.setLevel("DEBUG")17#18# Application-specific parameters19#20DEFAULT_WORKER_TIMEOUT = 120 # 2 minutes for small PyTests?21#22# June/July 2022 - new reporting strategy, based on an exported23# summary, edge details and unit test TRAPI JSON files24#25UNIT_TEST_NAME_PATTERN = re.compile(26 r"^test_onehops.py:(\d+)?:(test_trapi_(?P<component>kp|ara)s|\s)(\[(?P<case>[^]]+)])"27)28TEST_CASE_PATTERN = re.compile(29 r"^(?P<resource_id>[^#]+)(#(?P<edge_num>\d+))?(-(?P<test_id>.+))?$"30)31def build_resource_key(component: str, ara_id: Optional[str], kp_id: str) -> str:32 """33 Returns a key identifier ('path') to an test summary document of a given ARA and/or KP resource.34 :param component:35 :param ara_id: str, may be empty (if the resource is directly a KP)36 :param kp_id: str, should not be empty either when directly accessed or indirectly via an ARA37 :return: str, resource-centric document key38 """39 resource_key: str = component40 resource_key += f"/{ara_id}" if ara_id else ""41 resource_key += f"/{kp_id}"42 return resource_key43def build_resource_summary_key(component: str, ara_id: Optional[str], kp_id: str) -> str:44 """45 Returns a key identifier ('path') to an test summary document of a given ARA and/or KP resource.46 :param component:47 :param ara_id:48 :param kp_id:49 :return: str, resource-centric document key50 """51 return f"{build_resource_key(component,ara_id,kp_id)}/resource_summary"52def build_edge_details_key(component: str, ara_id: Optional[str], kp_id: str, edge_num: str) -> str:53 """54 Returns a key identifier ('path') to an edge details related document.55 :param component:56 :param ara_id:57 :param kp_id:58 :param edge_num:59 :return: str, edge-centric document key60 """61 return f"{build_resource_key(component,ara_id,kp_id)}/{kp_id}-{edge_num}"62def parse_unit_test_name(unit_test_key: str) -> Tuple[str, str, str, int, str, str]:63 """64 Reformat (test run key) source identifier into a well-behaved test file name.65 :param unit_test_key: original full unit test label66 :return: Tuple[ component, ara_id, kp_id, int(edge_num), test_id, edge_details_file_path]67 """68 unit_test_name = unit_test_key.split('/')[-1]69 psf = UNIT_TEST_NAME_PATTERN.match(unit_test_name)70 if psf:71 component = psf["component"]72 if component:73 component = component.upper()74 case = psf["case"]75 if case:76 tci = TEST_CASE_PATTERN.match(case)77 if tci:78 resource_id = tci["resource_id"]79 if resource_id:80 rpart = resource_id.split("|")81 if len(rpart) > 1:82 ara_id = rpart[0]83 kp_id = rpart[1]84 else:85 ara_id = None86 kp_id = rpart[0]87 edge_num = tci["edge_num"]88 if edge_num:89 test_id = tci["test_id"] if tci["test_id"] else "input"90 return (91 component,92 ara_id,93 kp_id,94 int(edge_num),95 test_id,96 build_edge_details_key(component, ara_id, kp_id, edge_num)97 )98 raise RuntimeError(f"parse_unit_test_name() '{unit_test_key}' has unknown format?")99class OneHopTestHarness:100 # Caching of processes, indexed by test_run_id (timestamp identifier as string)101 _test_run_id_2_worker_process: Dict[str, Dict] = dict()102 _test_report_database: Optional[TestReportDatabase] = None103 @classmethod104 def test_report_database(cls):105 if cls._test_report_database is None:106 cls._test_report_database = get_test_report_database()107 return cls._test_report_database108 @classmethod109 def initialize(cls):110 """111 Initialize the OneHopTestHarness environment,112 i.e. to recognized persisted test runs (in the TestReportDatabase)113 """114 logger.debug("Initializing the OneHopTestHarness environment")115 for test_run_id in cls.get_completed_test_runs():116 logger.debug(f"Found persisted test run {test_run_id} in TestReportDatabase")117 cls._test_run_id_2_worker_process[test_run_id] = {118 "command_line": None,119 "worker_process": None,120 "timeout": DEFAULT_WORKER_TIMEOUT,121 "percentage_completion": 100,122 "test_run_completed": True123 }124 @staticmethod125 def _generate_test_run_id() -> str:126 return datetime.now().strftime("%F_%H-%M-%S")127 def __init__(self, test_run_id: Optional[str] = None):128 """129 OneHopTestHarness constructor.130 :param test_run_id: Optional[str], known timestamp test run identifier; internally created if 'None'131 """132 self._command_line: Optional[str] = None133 self._process: Optional[WorkerProcess] = None134 self._timeout: Optional[int] = DEFAULT_WORKER_TIMEOUT135 self._test_run_completed: bool = False136 if test_run_id is not None:137 # should be an existing test run?138 self._test_run_id = test_run_id139 self._reload_run_parameters()140 else:141 # new (or 'local') test run? no run parameters to reload?142 self._test_run_id = self._generate_test_run_id()143 self._test_run_id_2_worker_process[self._test_run_id] = {}144 # Retrieve the associated test run report object145 self._test_report: Optional[TestReport] = \146 self.test_report_database().get_test_report(identifier=self._test_run_id)147 # TODO: need a sensible path/db_key for the log file148 # self._log_file_path = self.get_absolute_file_path(document_key="test.log", create_path=True)149 self._log_file_path: Optional[str] = None150 def get_test_run_id(self) -> Optional[str]:151 return self._test_run_id152 def get_test_report(self) -> Optional[TestReport]:153 return self._test_report154 def run(155 self,156 trapi_version: Optional[str] = None,157 biolink_version: Optional[str] = None,158 triple_source: Optional[str] = None,159 ara_source: Optional[str] = None,160 one: bool = False,161 log: Optional[str] = None,162 timeout: Optional[int] = DEFAULT_WORKER_TIMEOUT163 ):164 """165 Run the SRT Testing test harness as a worker process.166 :param trapi_version: Optional[str], TRAPI version assumed for test run (default: None)167 :param biolink_version: Optional[str], Biolink Model version used in test run (default: None)168 :param triple_source: Optional[str], 'REGISTRY', directory or file from which to retrieve triples169 (Default: 'REGISTRY', which triggers the use of metadata, in KP entries170 from the Translator SmartAPI Registry, to configure the tests).171 :param ara_source: Optional[str], 'REGISTRY', directory or file from which to retrieve ARA Config.172 (Default: 'REGISTRY', which triggers the use of metadata, in ARA entries173 from the Translator SmartAPI Registry, to configure the tests).174 :param one: bool, Only use first edge from each KP file (default: False if omitted).175 :param log: Optional[str], desired Python logger level label (default: None, implying default logger)176 :param timeout: Optional[int], worker process timeout in seconds (defaults to about 120 seconds177 :return: None178 """179 # possible override of timeout here?180 self._timeout = timeout if timeout else self._timeout181 self._command_line = f"cd {ONEHOP_TEST_DIRECTORY} {CMD_DELIMITER} " + \182 f"pytest --tb=line -vv"183 self._command_line += f" --log-cli-level={log}" if log else ""184 self._command_line += f" test_onehops.py"185 self._command_line += f" --test_run_id={self._test_run_id}"186 self._command_line += f" --TRAPI_Version={trapi_version}" if trapi_version else ""187 self._command_line += f" --Biolink_Version={biolink_version}" if biolink_version else ""188 self._command_line += f" --triple_source={triple_source}" if triple_source else ""189 self._command_line += f" --ARA_source={ara_source}" if ara_source else ""190 self._command_line += " --one" if one else ""191 logger.debug(f"OneHopTestHarness.run() command line: {self._command_line}")192 self._process = WorkerProcess(identifier=self._test_run_id, timeout=self._timeout)193 self._process.run_command(self._command_line)194 # Cache run parameters for later access as necessary195 # TODO: what about the TestReportDatabase(?)196 self._test_run_id_2_worker_process[self._test_run_id] = {197 "command_line": self._command_line,198 "worker_process": self._process,199 "timeout": self._timeout,200 "percentage_completion": 0, # Percentage Completion needs to be updated later?201 "test_run_completed": False202 }203 def get_worker(self) -> Optional[WorkerProcess]:204 return self._process205 def _set_percentage_completion(self, value: int):206 if self._test_run_id in self._test_run_id_2_worker_process:207 self._test_run_id_2_worker_process[self._test_run_id]["percentage_completion"] = value208 else:209 raise RuntimeError(210 f"_set_percentage_completion(): '{str(self._test_run_id)}' Worker Process is unknown!"211 )212 213 def _get_percentage_completion(self) -> int:214 if self._test_run_id in self._test_run_id_2_worker_process:215 return self._test_run_id_2_worker_process[self._test_run_id]["percentage_completion"]216 else:217 return -1 # signal unknown test run process?218 def _reload_run_parameters(self):219 # TODO: do we also need to reconnect to the TestReportDatabase here?220 if self._test_run_id in self._test_run_id_2_worker_process:221 run_parameters: Dict = self._test_run_id_2_worker_process[self._test_run_id]222 self._command_line = run_parameters["command_line"]223 self._process = run_parameters["worker_process"]224 self._timeout = run_parameters["timeout"]225 self._percentage_completion = run_parameters["percentage_completion"]226 self._test_run_completed = run_parameters["test_run_completed"]227 else:228 logger.warning(229 f"Test run '{self._test_run_id}' is not associated with a Worker Process. " +230 f"May be invalid or an historic archive? Client needs to check for the latter?")231 self._command_line = None232 self._process = None233 self._timeout = DEFAULT_WORKER_TIMEOUT234 self._percentage_completion = -1235 def test_run_complete(self) -> bool:236 if not self._test_run_completed:237 # If there is an active WorkerProcess...238 if self._process:239 # ... then poll the Queue for task completion240 status: str = self._process.status()241 if status.startswith(WorkerProcess.COMPLETED) or \242 status.startswith(WorkerProcess.NOT_RUNNING):243 self._test_run_completed = True244 if status.startswith(WorkerProcess.COMPLETED):245 logger.debug(status)246 self._process = None247 return self._test_run_completed248 def get_status(self) -> int:249 """250 If available, returns the percentage completion of the currently active OneHopTestHarness run.251 :return: int, 0..100 indicating the percentage completion of the test run. -1 if unknown test run ID252 """253 completed_test_runs: List[str] = self.get_completed_test_runs()254 if self._test_run_id in completed_test_runs:255 # Option 1: detection of a completed_test_run256 self._set_percentage_completion(100)257 elif 0 <= self._get_percentage_completion() < 95:258 for percentage_complete in self._process.get_output(timeout=1):259 logger.debug(f"Pytest % completion: {percentage_complete}")260 # We deliberately hold back declaring 100% completion to allow261 # the system to truly finish processing and return the full test report262 self._set_percentage_completion(int(float(percentage_complete)*0.95))263 elif self.test_run_complete():264 # Option 2, fail safe: sets completion at 100% if the task is not (or no longer) running?265 self._set_percentage_completion(100)266 return self._get_percentage_completion()267 def delete(self) -> str:268 try:269 if not (self.test_run_complete() and self._test_report):270 # test run still in progress...271 if self._process:272 # this is a blocking process termination but leaves273 # an incomplete TestReport in the TestReportDatabase274 self._process.terminate()275 self._process = None276 # Remove the process from the OneHopTestHarness cache277 if self._test_run_id in self._test_run_id_2_worker_process:278 self._test_run_id_2_worker_process.pop(self._test_run_id)279 success = self._test_report.delete(ignore_errors=True)280 except Exception as exc:281 # Not sure what other conditions would trigger this, if any282 logger.error(f"delete() exception: '{str(exc)}'")283 success = False284 outcome: str = f"Test Run '{self._test_run_id}': "285 if success:286 self._test_report = None287 outcome += "successfully deleted!"288 else:289 outcome += "test run deletion may have been problematic. Check the server logs!"290 return outcome291 def save_json_document(self, document_type: str, document: Dict, document_key: str, is_big: bool = False):292 """293 Saves an indexed document either to a test report database or the filing system.294 :param document_type: str, category label of document type being saved (for error reporting)295 :param document: Dict, Python object to persist as a JSON document.296 :param document_key: str, indexing path for the document being saved.297 :param is_big: bool, if True, flags that the JSON file is expected to require special handling due to its size.298 """299 self.get_test_report().save_json_document(300 document_type=document_type,301 document=document,302 document_key=document_key,303 is_big=is_big304 )305 @classmethod306 def get_completed_test_runs(cls) -> List[str]:307 """308 :return: list of test run identifiers of completed test runs309 """310 return cls.test_report_database().get_available_reports()311 def get_index(self) -> Optional[Dict]:312 """313 If available, returns a test result index - KP and ARA tags - for the most recent OneHopTestHarness run.314 :return: Optional[str], JSON document KP/ARA index of unit test results. 'None' if not (yet) available.315 """316 # TODO: can some part of this operation be cached, maybe by pushing317 # the index access down one more level, into the TestReport?318 summary: Optional[Dict] = self.get_test_report().retrieve_document(319 document_type="Summary", document_key="test_run_summary"320 )321 # Sanity check for existence of the summary...322 if not summary:323 return None324 # We extract the 'index' from the available 'test_run_summary' document325 index: Dict = dict()326 if "KP" in summary and summary["KP"]:327 index["KP"] = [str(key) for key in summary["KP"].keys()]328 if "ARA" in summary and summary["ARA"]:329 index["ARA"] = dict()330 for ara_id, ara in summary["ARA"].items():331 if "kps" in ara and ara["kps"]:332 kps: Dict = ara["kps"]333 index["ARA"][ara_id] = [str(key) for key in kps.keys()]334 return index335 def get_summary(self) -> Optional[Dict]:336 """337 If available, returns a test result summary for the most recent OneHopTestHarness run.338 :return: Optional[str], JSON document summary of unit test results. 'None' if not (yet) available.339 """340 summary: Optional[Dict] = self.get_test_report().retrieve_document(341 document_type="Summary", document_key="test_run_summary"342 )343 return summary344 def get_resource_summary(345 self,346 component: str,347 kp_id: str,348 ara_id: Optional[str] = None349 ) -> Optional[Dict]:350 """351 Returns test result summary across all edges for given resource component.352 :param component: str, Translator component being tested: 'ARA' or 'KP'353 :param kp_id: str, identifier of a KP resource being accessed.354 :param ara_id: Optional[str], identifier of the ARA resource being accessed. May be missing or None355 :return: Optional[Dict], JSON structured document of test details for a specified test edge of a356 KP or ARA resource, or 'None' if the details are not (yet) available.357 """358 document_key: str = build_resource_summary_key(component, ara_id, kp_id)359 resource_summary: Optional[Dict] = self.get_test_report().retrieve_document(360 document_type="Resource Summary", document_key=document_key361 )362 return resource_summary363 def get_details(364 self,365 component: str,366 edge_num: str,367 kp_id: str,368 ara_id: Optional[str] = None369 ) -> Optional[Dict]:370 """371 Returns test result details for given resource component and edge identities.372 :param component: str, Translator component being tested: 'ARA' or 'KP'373 :param edge_num: str, target input 'edge_num' edge number, as indexed as an edge of the JSON test run summary.374 :param kp_id: str, identifier of a KP resource being accessed.375 :param ara_id: Optional[str], identifier of the ARA resource being accessed. May be missing or None376 :return: Optional[Dict], JSON structured document of test details for a specified test edge of a377 KP or ARA resource, or 'None' if the details are not (yet) available.378 """379 document_key: str = build_edge_details_key(component, ara_id, kp_id, edge_num)380 details: Optional[Dict] = self.get_test_report().retrieve_document(381 document_type="Details", document_key=document_key382 )383 return details384 def get_streamed_response_file(385 self,386 component: str,387 edge_num: str,388 test_id: str,389 kp_id: str,390 ara_id: Optional[str] = None391 ) -> Generator:392 """393 Returns the TRAPI Response file path for given resource component, edge and unit test identities.394 :param component: str, Translator component being tested: 'ARA' or 'KP'395 :param edge_num: str, target input 'edge_num' edge number, as indexed as an edge of the JSON test run summary.396 :param test_id: str, target unit test identifier, one of the values noted in the397 edge leaf nodes of the JSON test run summary (e.g. 'by_subject', etc.).398 :param kp_id: str, identifier of a KP resource being accessed.399 :param ara_id: Optional[str], identifier of the ARA resource being accessed. May be missing or None400 :return: str, TRAPI Response text data file path (generated, but not tested here for file existence)401 """402 document_key: str = build_edge_details_key(component, ara_id, kp_id, edge_num)403 return self.get_test_report().stream_document(404 document_type="Details", document_key=f"{document_key}-{test_id}"...

Full Screen

Full Screen

junit.py

Source:junit.py Github

copy

Full Screen

...72 elif (73 message.status == TestRunStatus.FAILED74 or message.status == TestRunStatus.SUCCESS75 ):76 self._test_run_completed(message)77 # Handle a test case message.78 def _received_test_result(self, message: TestResultMessage) -> None:79 if message.status == TestStatus.RUNNING:80 self._test_case_running(message)81 elif message.is_completed:82 self._test_case_completed(message)83 # Test run started message.84 def _test_run_started(self, message: TestRunMessage) -> None:85 self._testsuites.attrib["name"] = message.runbook_name86 # Test run completed message.87 def _test_run_completed(self, message: TestRunMessage) -> None:88 total_tests = 089 total_failures = 090 for testsuite_info in self._testsuites_info.values():91 testsuite_info.xml.attrib["tests"] = str(testsuite_info.test_count)92 testsuite_info.xml.attrib["failures"] = str(testsuite_info.failed_count)93 testsuite_info.xml.attrib["errors"] = "0"94 total_tests += testsuite_info.test_count95 total_failures += testsuite_info.failed_count96 self._testsuites.attrib["time"] = self._get_elapsed_str(message)97 self._testsuites.attrib["tests"] = str(total_tests)98 self._testsuites.attrib["failures"] = str(total_failures)99 self._testsuites.attrib["errors"] = "0"100 def _test_case_running(self, message: TestResultMessage) -> None:101 if message.suite_full_name not in self._testsuites_info:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful