How to use start_test_item method in Lemoncheesecake

Best Python code snippet using lemoncheesecake

test_rp_agent.py

Source:test_rp_agent.py Github

copy

Full Screen

1import os2import mock3import pytest4from delayed_assert import assert_expectations, expect5from prettytable import PrettyTable6from reportportal_client import ReportPortalService7from behave_reportportal.behave_agent import BehaveAgent, create_rp_service8from behave_reportportal.config import Config9from behave_reportportal.utils import Singleton10@pytest.fixture()11def config():12 return Config(13 endpoint="endpoint",14 token="token",15 project="project",16 launch_id=None,17 launch_name="launch_name",18 launch_description="launch_description",19 )20@pytest.fixture(autouse=True)21def clean_instances():22 yield23 Singleton._instances = {}24@pytest.mark.parametrize(25 "status,expected",26 [27 ("passed", "PASSED"),28 ("skipped", "SKIPPED"),29 ("failed", "FAILED"),30 ("xyz", "PASSED"),31 ],32)33def test_convert_to_rp_status(status, expected):34 actual = BehaveAgent.convert_to_rp_status(status)35 assert (36 actual == expected37 ), "Incorrect status:\nActual: {}\nExpected:{}".format(actual, expected)38def test_attributes(config):39 mock_item = mock.Mock()40 mock_item.tags = None41 mock_rps = mock.create_autospec(ReportPortalService)42 ba = BehaveAgent(config, mock_rps)43 expect(ba._attributes(mock_item) == [], "Attributes is not empty")44 mock_item.tags = ["a", "b", "attribute(k1:v1,v2)"]45 exp = [46 {"value": "a"},47 {"value": "b"},48 {"key": "k1", "value": "v1"},49 {"value": "v2"},50 ]51 act = ba._attributes(mock_item)52 expect(53 act == exp,54 "Attributes are incorrect:\nActual: {}\nExpected: {}".format(act, exp),55 )56 assert_expectations()57@pytest.mark.parametrize(58 "tags,exp_attrs",59 [60 (["attribute( k1: v1, v2,v3 )"], ["k1: v1", "v2", "v3"]),61 (["attribute(k1:v1,k2:v2)"], ["k1:v1", "k2:v2"]),62 (["attribute(v1,v2)"], ["v1", "v2"]),63 (["attribute(v1)"], ["v1"]),64 (["attribute(v1)", "attribute(k2:v2,v3)"], ["v1", "k2:v2", "v3"]),65 (["attr(v1)"], []),66 (["attribute"], []),67 (["attribute)"], []),68 (["attribute("], []),69 (["attribute()"], []),70 (["attribute(some_text"], []),71 (["attributesome_text)"], []),72 ],73)74def test_get_attributes_from_tags(tags, exp_attrs):75 act_attrs = BehaveAgent._get_attributes_from_tags(tags)76 assert act_attrs == exp_attrs77@pytest.mark.parametrize(78 "tags,exp_test_case_id",79 [80 (["test_case_id(123)"], "123"),81 (["test_case_id(1,2,3)"], "1,2,3"),82 (["test_case_id()"], None),83 (["test_case_id(1)", "test_case_id(2)"], "1"),84 (["some_tag"], None),85 (["some_tag", "test_case_id(2)"], "2"),86 (["test_case_id"], None),87 (["test_case_id("], None),88 (["test_case_id)"], None),89 ],90)91def test_case_id(tags, exp_test_case_id):92 mock_scenario = mock.Mock()93 mock_scenario.tags = tags94 act_test_case_id = BehaveAgent._test_case_id(mock_scenario)95 assert act_test_case_id == exp_test_case_id96def test_code_ref():97 mock_item = mock.Mock()98 mock_item.location = None99 expect(BehaveAgent._code_ref(mock_item) is None, "code_ref is not None")100 mock_location = mock.Mock()101 mock_location.filename = "filename"102 mock_location.line = 24103 mock_item.location = mock_location104 expect(105 BehaveAgent._code_ref(mock_item) == "filename:24",106 "code_ref is incorrect:\nActual: {}\nExpected: {}".format(107 BehaveAgent._code_ref(mock_item), "filename:24"108 ),109 )110 assert_expectations()111def test_get_parameters():112 mock_item = mock.Mock()113 mock_item._row = None114 expect(115 BehaveAgent._get_parameters(mock_item) is None,116 "parameters is not None",117 )118 mock_row = mock.Mock()119 mock_row.headings = ["A", "B"]120 mock_row.cells = [1, 2]121 mock_item._row = mock_row122 expect(123 BehaveAgent._get_parameters(mock_item) == {"A": 1, "B": 2},124 "parameters are incorrect:\nActual: {}\nExpected: {}".format(125 BehaveAgent._get_parameters(mock_item), {"A": 1, "B": 2}126 ),127 )128 assert_expectations()129def test_create_rp_service_disabled_rp():130 assert (131 create_rp_service(Config()) is None132 ), "Service is not None for disabled integration with RP in config"133def test_create_rp_service_enabled_rp(config):134 rp = create_rp_service(config)135 assert isinstance(136 rp, ReportPortalService137 ), "Invalid initialization of RP ReportPortalService"138@mock.patch("behave_reportportal.behave_agent.ReportPortalService")139def test_create_rp_service_init(mock_rps):140 create_rp_service(Config(endpoint="A", token="B", project="C"))141 mock_rps.assert_has_calls(142 [143 mock.call(144 endpoint="A",145 launch_id=None,146 token="B",147 project="C",148 is_skipped_an_issue=False,149 retries=None,150 )151 ],152 any_order=True,153 )154def test_init_invalid_config():155 ba = BehaveAgent(Config())156 assert ba._rp is None, "Incorrect initialization of agent"157def test_init_valid_config(config):158 ba = BehaveAgent(config, mock.Mock())159 expect(ba._cfg is not None, "Config is None")160 expect(ba._rp is not None, "Incorrect initialization of agent")161 assert_expectations()162def test_item_description():163 mock_item = mock.Mock()164 mock_item.description = None165 expect(166 BehaveAgent._item_description(mock_item) is None,167 "Description is not None",168 )169 mock_item.description = ["a", "b"]170 expect(171 BehaveAgent._item_description(mock_item) == "Description:\na\nb",172 "Description is incorrect:\nActual: {}\nExpected: {}".format(173 BehaveAgent._item_description(mock_item), "Description:\na\nb"174 ),175 )176 assert_expectations()177@mock.patch("behave_reportportal.behave_agent.timestamp")178def test_start_launch(mock_timestamp, config):179 mock_timestamp.return_value = 123180 mock_rps = mock.create_autospec(ReportPortalService)181 mock_rps.launch_id = None182 mock_context = mock.Mock()183 ba = BehaveAgent(config, mock_rps)184 ba.start_launch(mock_context, some_key="some_value")185 mock_rps.start_launch.assert_called_once_with(186 name=config.launch_name,187 start_time=123,188 attributes=ba._get_launch_attributes(),189 description=config.launch_description,190 some_key="some_value",191 rerun=False,192 rerunOf=None,193 )194@mock.patch("behave_reportportal.behave_agent.timestamp")195def test_start_launch_with_rerun(mock_timestamp):196 mock_timestamp.return_value = 123197 mock_rps = mock.create_autospec(ReportPortalService)198 mock_rps.launch_id = None199 mock_context = mock.Mock()200 cfg = Config(201 endpoint="endpoint",202 token="token",203 project="project",204 launch_name="launch_name",205 launch_description="launch_description",206 retun=True,207 rerun_of="launch_id",208 )209 ba = BehaveAgent(cfg, mock_rps)210 ba.start_launch(mock_context, some_key="some_value")211 mock_rps.start_launch.assert_called_once_with(212 name=cfg.launch_name,213 start_time=123,214 attributes=ba._get_launch_attributes(),215 description=cfg.launch_description,216 some_key="some_value",217 rerun=cfg.rerun,218 rerunOf=cfg.rerun_of,219 )220@mock.patch("behave_reportportal.behave_agent.timestamp")221def test_finish_launch(mock_timestamp, config):222 mock_timestamp.return_value = 123223 mock_rps = mock.create_autospec(ReportPortalService)224 mock_context = mock.Mock()225 ba = BehaveAgent(config, mock_rps)226 ba.finish_launch(mock_context, some_key="some_value")227 mock_rps.finish_launch.assert_called_once_with(228 end_time=123, some_key="some_value"229 )230 mock_rps.terminate.assert_called_once()231@mock.patch("behave_reportportal.behave_agent.timestamp")232def test_start_skipped_feature(mock_timestamp, config):233 mock_feature = mock.Mock()234 mock_feature.tags = ["some_tag", "skip"]235 mock_timestamp.return_value = 123236 verify_start_feature(mock_feature, config)237 mock_feature.skip.assert_called_once_with("Marked with @skip")238@mock.patch("behave_reportportal.behave_agent.timestamp")239def test_start_feature(mock_timestamp, config):240 mock_feature = mock.Mock()241 mock_feature.tags = None242 mock_timestamp.return_value = 123243 verify_start_feature(mock_feature, config)244def verify_start_feature(mock_feature, config):245 mock_rps = mock.create_autospec(ReportPortalService)246 mock_rps.start_test_item.return_value = "feature_id"247 mock_context = mock.Mock()248 mock_feature.name = "feature_name"249 mock_feature.description = ["A", "B"]250 ba = BehaveAgent(config, mock_rps)251 ba.start_feature(mock_context, mock_feature, some_key="some_value")252 mock_rps.start_test_item.assert_called_once_with(253 name="feature_name",254 start_time=123,255 item_type="SUITE",256 description=BehaveAgent._item_description(mock_feature),257 code_ref=BehaveAgent._code_ref(mock_feature),258 attributes=ba._attributes(mock_feature),259 some_key="some_value",260 )261 assert (262 ba._feature_id == "feature_id"263 ), "Invalid feature_id:\nActual: {}\nExpected: {}\n".format(264 ba._feature_id, "feature_id"265 )266@pytest.mark.parametrize(267 "tags,expected_status", [(None, "PASSED"), (["skip"], "SKIPPED")]268)269@mock.patch("behave_reportportal.behave_agent.timestamp")270def test_finish_feature(mock_timestamp, config, tags, expected_status):271 mock_feature = mock.Mock()272 mock_feature.tags = tags273 mock_feature.status.name = "passed"274 mock_timestamp.return_value = 123275 mock_rps = mock.create_autospec(ReportPortalService)276 mock_context = mock.Mock()277 mock_context._stack = []278 ba = BehaveAgent(config, mock_rps)279 ba._feature_id = "feature_id"280 ba.finish_feature(mock_context, mock_feature, some_key="some_value")281 mock_rps.finish_test_item.assert_called_once_with(282 item_id="feature_id",283 end_time=123,284 status=expected_status,285 some_key="some_value",286 )287@mock.patch("behave_reportportal.behave_agent.timestamp")288def test_start_skipped_scenario(mock_timestamp, config):289 mock_scenario = mock.Mock()290 mock_scenario.tags = ["some_tag", "skip"]291 mock_timestamp.return_value = 123292 verify_start_scenario(mock_scenario, config)293 mock_scenario.skip.assert_called_once_with("Marked with @skip")294@mock.patch("behave_reportportal.behave_agent.timestamp")295def test_start_scenario(mock_timestamp, config):296 mock_scenario = mock.Mock()297 mock_scenario.tags = None298 mock_timestamp.return_value = 123299 verify_start_scenario(mock_scenario, config)300def verify_start_scenario(mock_scenario, config):301 mock_rps = mock.create_autospec(ReportPortalService)302 mock_rps.start_test_item.return_value = "scenario_id"303 mock_context = mock.Mock()304 mock_scenario.name = "scenario_name"305 mock_scenario._row = None306 mock_scenario.description = ["A", "B"]307 ba = BehaveAgent(config, mock_rps)308 ba._feature_id = "feature_id"309 ba.start_scenario(mock_context, mock_scenario, some_key="some_value")310 mock_rps.start_test_item.assert_called_once_with(311 name="scenario_name",312 start_time=123,313 item_type="STEP",314 parent_item_id="feature_id",315 description=BehaveAgent._item_description(mock_scenario),316 code_ref=BehaveAgent._code_ref(mock_scenario),317 parameters=BehaveAgent._get_parameters(mock_scenario),318 attributes=ba._attributes(mock_scenario),319 test_case_id=ba._test_case_id(mock_scenario),320 some_key="some_value",321 )322 assert (323 ba._scenario_id == "scenario_id"324 ), "Invalid scenario_id:\nActual: {}\nExpected: {}\n".format(325 ba._scenario_id, "scenario_id"326 )327@pytest.mark.parametrize(328 "tags,expected_status", [(None, "PASSED"), (["skip"], "SKIPPED")]329)330@mock.patch("behave_reportportal.behave_agent.timestamp")331def test_finish_scenario(mock_timestamp, config, tags, expected_status):332 mock_scenario = mock.Mock()333 mock_scenario.tags = tags334 mock_scenario.status.name = "passed"335 mock_timestamp.return_value = 123336 mock_rps = mock.create_autospec(ReportPortalService)337 mock_context = mock.Mock()338 mock_context._stack = []339 ba = BehaveAgent(config, mock_rps)340 ba._scenario_id = "scenario_id"341 ba.finish_scenario(mock_context, mock_scenario, some_key="some_value")342 mock_rps.finish_test_item.assert_called_once_with(343 item_id="scenario_id",344 end_time=123,345 status=expected_status,346 some_key="some_value",347 )348@mock.patch.object(BehaveAgent, "_log_scenario_exception")349def test_finish_failed_scenario(mock_log, config):350 mock_scenario = mock.Mock()351 mock_scenario.tags = []352 mock_scenario.status.name = "failed"353 mock_rps = mock.create_autospec(ReportPortalService)354 mock_context = mock.Mock()355 mock_context._stack = []356 ba = BehaveAgent(config, mock_rps)357 ba.finish_scenario(mock_context, mock_scenario)358 mock_log.assert_called_once_with(mock_scenario)359@mock.patch("behave_reportportal.behave_agent.timestamp")360def test_start_step_step_based(mock_timestamp, config):361 config.step_based = True362 mock_step = mock.Mock()363 mock_step.keyword = "keyword"364 mock_step.name = "name"365 mock_step.text = None366 mock_step.table = None367 mock_timestamp.return_value = 123368 mock_rps = mock.create_autospec(ReportPortalService)369 mock_rps.start_test_item.return_value = "step_id"370 mock_context = mock.Mock()371 ba = BehaveAgent(config, mock_rps)372 ba._scenario_id = "scenario_id"373 ba.start_step(mock_context, mock_step, some_key="some_value")374 mock_rps.start_test_item.assert_called_once_with(375 name="[keyword]: name",376 start_time=123,377 item_type="STEP",378 parent_item_id="scenario_id",379 description="",380 code_ref=BehaveAgent._code_ref(mock_step),381 some_key="some_value",382 )383 ba._step_id = "step_id"384def test_start_step_scenario_based(config):385 config.step_based = False386 mock_step = mock.Mock()387 mock_rps = mock.create_autospec(ReportPortalService)388 mock_context = mock.Mock()389 ba = BehaveAgent(config, mock_rps)390 ba.start_step(mock_context, mock_step, some_key="some_value")391 mock_rps.start_test_item.assert_not_called()392@mock.patch("behave_reportportal.behave_agent.timestamp")393def test_finish_passed_step_step_based(mock_timestamp, config):394 config.step_based = True395 mock_step = mock.Mock()396 mock_step.status.name = "passed"397 mock_timestamp.return_value = 123398 mock_rps = mock.create_autospec(ReportPortalService)399 mock_context = mock.Mock()400 ba = BehaveAgent(config, mock_rps)401 ba._step_id = "step_id"402 ba.finish_step(mock_context, mock_step, some_key="some_value")403 mock_rps.finish_test_item.assert_called_once_with(404 item_id="step_id", end_time=123, status="PASSED", some_key="some_value"405 )406@mock.patch("behave_reportportal.behave_agent.timestamp")407def test_finish_failed_step_step_based(mock_timestamp, config):408 config.step_based = True409 mock_step = mock.Mock()410 mock_step.keyword = "keyword"411 mock_step.name = "name"412 mock_step.status.name = "failed"413 mock_step.exception.args = ["Exception message"]414 mock_step.error_message = "Error massage"415 mock_timestamp.return_value = 123416 mock_rps = mock.create_autospec(ReportPortalService)417 mock_context = mock.Mock()418 ba = BehaveAgent(config, mock_rps)419 ba._step_id = "step_id"420 ba._scenario_id = "step_id"421 ba.finish_step(mock_context, mock_step, some_key="some_value")422 mock_rps.finish_test_item.assert_called_once_with(423 item_id="step_id", end_time=123, status="FAILED", some_key="some_value"424 )425 mock_rps.log.assert_has_calls(426 [427 mock.call(428 item_id="step_id",429 time=123,430 level="ERROR",431 message="Step [keyword]: name was finished with exception.\n"432 "Exception message\nError massage",433 )434 ]435 )436@mock.patch("behave_reportportal.behave_agent.timestamp")437def test_finish_failed_step_scenario_based(mock_timestamp, config):438 config.step_based = False439 mock_step = mock.Mock()440 mock_step.keyword = "keyword"441 mock_step.name = "name"442 mock_step.status.name = "failed"443 mock_step.text = None444 mock_step.table = None445 mock_step.exception.args = ["Exception message"]446 mock_step.error_message = "Error message"447 mock_timestamp.return_value = 123448 mock_rps = mock.create_autospec(ReportPortalService)449 mock_context = mock.Mock()450 ba = BehaveAgent(config, mock_rps)451 ba._scenario_id = "scenario_id"452 ba.finish_step(mock_context, mock_step)453 calls = [454 mock.call(455 item_id="scenario_id",456 time=123,457 level="ERROR",458 message="Step [keyword]: name was finished with exception.\n"459 "Exception message\nError message",460 ),461 mock.call(462 item_id="scenario_id",463 time=123,464 level="INFO",465 message="[keyword]: name. ",466 ),467 ]468 mock_rps.log.assert_has_calls(calls, any_order=True)469@mock.patch("behave_reportportal.behave_agent.timestamp")470def test_log_exception_without_message(mock_timestamp):471 mock_timestamp.return_value = 123472 mock_step = mock.Mock()473 mock_step.exception = None474 mock_step.error_message = None475 mock_step.keyword = "keyword"476 mock_step.name = "name"477 mock_rps = mock.create_autospec(ReportPortalService)478 ba = BehaveAgent(config, mock_rps)479 ba._log_step_exception(mock_step, "step_id")480 mock_rps.log.assert_called_once_with(481 item_id="step_id",482 time=123,483 level="ERROR",484 message="Step [keyword]: name was finished with exception.",485 )486@mock.patch.dict(os.environ, {"AGENT_NO_ANALYTICS": "1"})487@mock.patch("behave_reportportal.behave_agent.send_event")488def test_skip_analytics(mock_send_event, config):489 mock_rps = mock.create_autospec(ReportPortalService)490 mock_rps.launch_id = None491 mock_context = mock.Mock()492 ba = BehaveAgent(config, mock_rps)493 ba.start_launch(mock_context)494 mock_send_event.assert_not_called()495@mock.patch("behave_reportportal.behave_agent.send_event")496def test_analytics(mock_send_event, config):497 mock_rps = mock.create_autospec(ReportPortalService)498 mock_rps.launch_id = None499 mock_context = mock.Mock()500 ba = BehaveAgent(config, mock_rps)501 ba.start_launch(mock_context)502 mock_send_event.assert_called_once_with(ba.agent_name, ba.agent_version)503def test_rp_is_none():504 ba = BehaveAgent(Config(), None)505 ba.start_step(mock.Mock(), mock.Mock)506 assert ba._step_id is None507@mock.patch.object(BehaveAgent, "_log")508def test_post_log(mock_log, config):509 mock_rps = mock.create_autospec(ReportPortalService)510 ba = BehaveAgent(config, mock_rps)511 ba._log_item_id = "log_item_id"512 ba.post_log("message", file_to_attach="filepath")513 mock_log.assert_called_once_with(514 "message", "INFO", item_id="log_item_id", file_to_attach="filepath"515 )516@mock.patch.object(BehaveAgent, "_log")517def test_post_launch_log(mock_log, config):518 mock_rps = mock.create_autospec(ReportPortalService)519 ba = BehaveAgent(config, mock_rps)520 ba._log_item_id = "log_item_id"521 ba.post_launch_log("message", file_to_attach="filepath")522 mock_log.assert_called_once_with(523 "message", "INFO", file_to_attach="filepath"524 )525@mock.patch("behave_reportportal.behave_agent.mimetypes")526@mock.patch("behave_reportportal.behave_agent.timestamp")527def test_post__log(mock_timestamp, mock_mime, config):528 mock_timestamp.return_value = 123529 mock_rps = mock.create_autospec(ReportPortalService)530 ba = BehaveAgent(config, mock_rps)531 mock_mime.guess_type.return_value = ("mime_type", None)532 with mock.patch("builtins.open", mock.mock_open(read_data="data")):533 ba._log(534 "message", "ERROR", file_to_attach="filepath", item_id="item_id"535 )536 mock_rps.log.assert_called_once_with(537 time=123,538 message="message",539 level="ERROR",540 attachment={541 "name": "filepath",542 "data": "data",543 "mime": "mime_type",544 },545 item_id="item_id",546 )547@mock.patch.object(PrettyTable, "__init__")548@mock.patch.object(PrettyTable, "add_row")549@mock.patch.object(PrettyTable, "get_string")550def test_build_table_content(mock_get_string, mock_add_row, mock_init):551 mock_init.return_value = None552 mock_table, mock_rows = mock.Mock(), mock.Mock()553 mock_table.headings = ["A", "B"]554 mock_rows.cells = ["c", "d"]555 mock_table.rows = [mock_rows]556 BehaveAgent._build_table_content(mock_table)557 mock_init.assert_called_once_with(field_names=["A", "B"])558 mock_add_row.assert_called_once_with(["c", "d"])559 mock_get_string.assert_called_once()560@mock.patch("behave_reportportal.behave_agent.timestamp")561def test_log_scenario_exception_default_message(mock_timestamp, config):562 mock_timestamp.return_value = 123563 mock_scenario = mock.Mock()564 mock_scenario.exception = None565 mock_scenario.error_message = None566 mock_scenario.name = "scenario_name"567 mock_rps = mock.create_autospec(ReportPortalService)568 ba = BehaveAgent(config, mock_rps)569 ba._scenario_id = "scenario_id"570 ba._log_scenario_exception(mock_scenario)571 mock_rps.log.assert_called_once_with(572 item_id="scenario_id",573 time=123,574 level="ERROR",575 message="Scenario 'scenario_name' finished with error.",576 )577@mock.patch("behave_reportportal.behave_agent.timestamp")578def test_log_scenario_exception(mock_timestamp, config):579 mock_timestamp.return_value = 123580 mock_scenario = mock.Mock()581 mock_scenario.exception.args = ["Exception arg1", "Exception arg2"]582 mock_scenario.error_message = "Error message"583 mock_scenario.name = "scenario_name"584 mock_rps = mock.create_autospec(ReportPortalService)585 ba = BehaveAgent(config, mock_rps)586 ba._scenario_id = "scenario_id"587 ba._log_scenario_exception(mock_scenario)588 mock_rps.log.assert_called_once_with(589 item_id="scenario_id",590 time=123,591 level="ERROR",592 message="Scenario 'scenario_name' finished with error.\n"593 "Exception arg1, Exception arg2\nError message",594 )595@pytest.mark.parametrize("tags", [None, ["A", "B"]])596def test_log_fixtures_without_fixture_tags(tags, config):597 mock_rps = mock.create_autospec(ReportPortalService)598 mock_item = mock.Mock()599 mock_item.tags = tags600 BehaveAgent(config, mock_rps)._log_fixtures(mock_item, "type", "item_id")601 mock_rps.log.assert_not_called()602 mock_rps.start_test_item.assert_not_called()603@mock.patch("behave_reportportal.behave_agent.timestamp")604def test_log_fixtures(mock_timestamp):605 mock_timestamp.return_value = 123606 cfg = Config(607 endpoint="endpoint",608 token="token",609 project="project",610 step_based="False",611 )612 mock_rps = mock.create_autospec(ReportPortalService)613 mock_item = mock.Mock()614 mock_item.tags = ["fixture.A", "fixture.B"]615 BehaveAgent(cfg, mock_rps)._log_fixtures(mock_item, "type", "item_id")616 mock_rps.log.assert_has_calls(617 [618 mock.call(619 123,620 "Using of '{}' fixture".format(t),621 level="INFO",622 item_id="item_id",623 )624 for t in ("A", "B")625 ],626 any_order=True,627 )628 cfg.step_based = True629 BehaveAgent(cfg, mock_rps)._log_fixtures(mock_item, "type", "item_id")630 mock_rps.start_test_item.assert_has_calls(631 [632 mock.call(633 start_time=123,634 name="Using of '{}' fixture".format(t),635 item_type="type",636 parent_item_id="item_id",637 )638 for t in ("A", "B")639 ],640 any_order=True,641 )642 assert mock_rps.finish_test_item.call_count == 2643def test_log_cleanup_no_layer(config):644 mock_rps = mock.create_autospec(ReportPortalService)645 mock_context, mock_func = mock.Mock(), mock.Mock()646 mock_func.__name__ = "cleanup_func"647 mock_context._stack = [{"@layer": "scenario", "@cleanups": [mock_func]}]648 BehaveAgent(config, mock_rps)._log_cleanups(mock_context, "feature")649 mock_rps.start_test_item.assert_not_called()650 mock_context._stack = [{"@layer": "feature"}]651 BehaveAgent(config, mock_rps)._log_cleanups(mock_context, "scenario")652 mock_rps.start_test_item.assert_not_called()653def test_log_cleanup_no_cleanups(config):654 mock_rps = mock.create_autospec(ReportPortalService)655 mock_context = mock.Mock()656 mock_context._stack = [{"@layer": "feature"}]657 BehaveAgent(config, mock_rps)._log_cleanups(mock_context, "feature")658 mock_rps.start_test_item.assert_not_called()659@pytest.mark.parametrize(660 "scope,item_type,item_id",661 [662 ("feature", "AFTER_SUITE", "feature_id"),663 ("scenario", "AFTER_TEST", "scenario_id"),664 ],665)666@mock.patch("behave_reportportal.behave_agent.timestamp")667def test_log_cleanup_step_based(mock_timestamp, scope, item_type, item_id):668 cfg = Config(endpoint="E", token="T", project="P", step_based=True)669 mock_timestamp.return_value = 123670 mock_rps = mock.create_autospec(ReportPortalService)671 mock_context, mock_func1, mock_func2 = mock.Mock(), mock.Mock, mock.Mock()672 mock_func1.__name__ = "cleanup_func1"673 mock_func2.__name__ = "cleanup_func2"674 mock_context._stack = [675 {"@layer": scope, "@cleanups": [mock_func1, mock_func2]}676 ]677 ba = BehaveAgent(cfg, mock_rps)678 ba._feature_id = "feature_id"679 ba._scenario_id = "scenario_id"680 ba._log_cleanups(mock_context, scope)681 calls = [682 mock.call(683 name="Execution of '{}' cleanup function".format(f_name),684 start_time=123,685 item_type=item_type,686 parent_item_id=item_id,687 )688 for f_name in ("cleanup_func1", "cleanup_func2")689 ]690 mock_rps.start_test_item.assert_has_calls(calls)691 assert mock_rps.finish_test_item.call_count == 2692@pytest.mark.parametrize(693 "scope,item_id", [("feature", "feature_id"), ("scenario", "scenario_id")]694)695@mock.patch("behave_reportportal.behave_agent.timestamp")696def test_log_cleanup_scenario_based(mock_timestamp, config, scope, item_id):697 mock_timestamp.return_value = 123698 mock_rps = mock.create_autospec(ReportPortalService)699 mock_context, mock_func1, mock_func2 = mock.Mock(), mock.Mock, mock.Mock()700 mock_func1.__name__ = "cleanup_func1"701 mock_func2.__name__ = "cleanup_func2"702 mock_context._stack = [703 {"@layer": scope, "@cleanups": [mock_func1, mock_func2]}704 ]705 ba = BehaveAgent(config, mock_rps)706 ba._feature_id = "feature_id"707 ba._scenario_id = "scenario_id"708 ba._log_cleanups(mock_context, scope)709 calls = [710 mock.call(711 123,712 "Execution of '{}' cleanup function".format(f_name),713 level="INFO",714 item_id=item_id,715 )716 for f_name in ("cleanup_func1", "cleanup_func2")717 ]...

Full Screen

Full Screen

behave_agent.py

Source:behave_agent.py Github

copy

Full Screen

...74 def start_feature(self, context, feature, **kwargs):75 """Start feature in Report Portal."""76 if feature.tags and "skip" in feature.tags:77 feature.skip("Marked with @skip")78 self._feature_id = self._rp.start_test_item(79 name=feature.name,80 start_time=timestamp(),81 item_type="SUITE",82 description=self._item_description(feature),83 code_ref=self._code_ref(feature),84 attributes=self._attributes(feature),85 **kwargs86 )87 self._log_fixtures(feature, "BEFORE_SUITE", self._feature_id)88 self._log_item_id = self._feature_id89 @check_rp_enabled90 def finish_feature(self, context, feature, status=None, **kwargs):91 """Finish feature in Report Portal."""92 if feature.tags and "skip" in feature.tags:93 status = "SKIPPED"94 self._log_cleanups(context, "feature")95 self._rp.finish_test_item(96 item_id=self._feature_id,97 end_time=timestamp(),98 status=status or self.convert_to_rp_status(feature.status.name),99 **kwargs100 )101 @check_rp_enabled102 def start_scenario(self, context, scenario, **kwargs):103 """Start scenario in Report Portal."""104 if scenario.tags and "skip" in scenario.tags:105 scenario.skip("Marked with @skip")106 self._scenario_id = self._rp.start_test_item(107 name=scenario.name,108 start_time=timestamp(),109 item_type="STEP",110 parent_item_id=self._feature_id,111 code_ref=self._code_ref(scenario),112 attributes=self._attributes(scenario),113 parameters=self._get_parameters(scenario),114 description=self._item_description(scenario),115 test_case_id=self._test_case_id(scenario),116 **kwargs117 )118 self._log_fixtures(scenario, "BEFORE_TEST", self._scenario_id)119 self._log_item_id = self._scenario_id120 @check_rp_enabled121 def finish_scenario(self, context, scenario, status=None, **kwargs):122 """Finish scenario in Report Portal."""123 if scenario.tags and "skip" in scenario.tags:124 status = "SKIPPED"125 if scenario.status.name == "failed":126 self._log_scenario_exception(scenario)127 self._log_cleanups(context, "scenario"),128 self._rp.finish_test_item(129 item_id=self._scenario_id,130 end_time=timestamp(),131 status=status or self.convert_to_rp_status(scenario.status.name),132 **kwargs133 )134 self._log_item_id = self._feature_id135 @check_rp_enabled136 def start_step(self, context, step, **kwargs):137 """Start test in Report Portal."""138 if self._cfg.step_based:139 description = step.text or ""140 self._step_id = self._rp.start_test_item(141 name="[{keyword}]: {name}".format(142 keyword=step.keyword, name=step.name143 ),144 start_time=timestamp(),145 item_type="STEP",146 parent_item_id=self._scenario_id,147 code_ref=self._code_ref(step),148 description=description149 + self._build_table_content(step.table),150 **kwargs151 )152 self._log_item_id = self._step_id153 @check_rp_enabled154 def finish_step(self, context, step, **kwargs):155 """Finish test in Report Portal."""156 if self._cfg.step_based:157 self._finish_step_step_based(step, **kwargs)158 return159 self._finish_step_scenario_based(step, **kwargs)160 @check_rp_enabled161 def post_log(162 self, message, level="INFO", item_id=None, file_to_attach=None163 ):164 """Post log message to current test item."""165 self._log(166 message,167 level,168 file_to_attach=file_to_attach,169 item_id=item_id or self._log_item_id,170 )171 @check_rp_enabled172 def post_launch_log(self, message, level="INFO", file_to_attach=None):173 """Post log message to launch."""174 self._log(message, level, file_to_attach=file_to_attach)175 def _log(self, message, level, file_to_attach=None, item_id=None):176 attachment = None177 if file_to_attach:178 with open(file_to_attach, "rb") as f:179 attachment = {180 "name": os.path.basename(file_to_attach),181 "data": f.read(),182 "mime": mimetypes.guess_type(file_to_attach)[0]183 or "application/octet-stream",184 }185 self._rp.log(186 time=timestamp(),187 message=message,188 level=level,189 attachment=attachment,190 item_id=item_id,191 )192 def _get_launch_attributes(self):193 """Return launch attributes in the format supported by the rp."""194 attributes = self._cfg.launch_attributes or []195 system_attributes = get_launch_sys_attrs()196 system_attributes["agent"] = "{}-{}".format(197 self.agent_name, self.agent_version198 )199 return attributes + _dict_to_payload(system_attributes)200 @staticmethod201 def _build_table_content(table):202 if not table:203 return ""204 pt = PrettyTable(field_names=table.headings)205 [pt.add_row(row.cells) for row in table.rows]206 return "\n" + pt.get_string()207 def _finish_step_step_based(self, step, status=None, **kwargs):208 if step.status.name == "failed":209 self._log_step_exception(step, self._step_id)210 self._rp.finish_test_item(211 item_id=self._step_id,212 end_time=timestamp(),213 status=status or self.convert_to_rp_status(step.status.name),214 **kwargs215 )216 self._log_item_id = self._scenario_id217 def _finish_step_scenario_based(self, step, **kwargs):218 self._rp.log(219 item_id=self._scenario_id,220 time=timestamp(),221 message="[{keyword}]: {name}. {text}{table}".format(222 keyword=step.keyword,223 name=step.name,224 text=step.text or "",225 table=self._build_table_content(step.table),226 ),227 level="INFO",228 **kwargs229 )230 if step.status.name == "failed":231 self._log_step_exception(step, self._scenario_id)232 def _log_step_exception(self, step, item_id):233 message = [234 "Step [{keyword}]: {name} was finished with exception.".format(235 keyword=step.keyword, name=step.name236 )237 ]238 if step.exception:239 message.append(", ".join(step.exception.args))240 if step.error_message:241 message.append(step.error_message)242 self._rp.log(243 item_id=item_id,244 time=timestamp(),245 level="ERROR",246 message="\n".join(message),247 )248 def _log_scenario_exception(self, scenario):249 message = ["Scenario '{}' finished with error.".format(scenario.name)]250 if scenario.exception:251 message.append(", ".join(scenario.exception.args))252 if scenario.error_message:253 message.append(scenario.error_message)254 self._rp.log(255 item_id=self._scenario_id,256 time=timestamp(),257 level="ERROR",258 message="\n".join(message),259 )260 def _log_fixtures(self, item, item_type, parent_item_id):261 """262 Log used fixtures for item.263 It will log records for scenario based approach264 and step for step based.265 """266 if not item.tags:267 return268 for tag in item.tags:269 if not tag.startswith("fixture."):270 continue271 msg = "Using of '{}' fixture".format(tag[len("fixture.") :])272 if self._cfg.step_based:273 self._step_id = self._rp.start_test_item(274 name=msg,275 start_time=timestamp(),276 item_type=item_type,277 parent_item_id=parent_item_id,278 )279 self._rp.finish_test_item(self._step_id, timestamp(), "PASSED")280 continue281 self._rp.log(282 timestamp(),283 msg,284 level="INFO",285 item_id=parent_item_id,286 )287 def _log_cleanups(self, context, scope):288 layer = next(289 iter(290 [291 level292 for level in context._stack293 if level.get("@layer") == scope294 ]295 ),296 None,297 )298 if not layer:299 return300 item_type = "AFTER_SUITE" if scope == "feature" else "AFTER_TEST"301 item_id = self._feature_id if scope == "feature" else self._scenario_id302 for cleanup in layer.get("@cleanups", []):303 msg = "Execution of '{}' cleanup function".format(cleanup.__name__)304 if self._cfg.step_based:305 self._step_id = self._step_id = self._rp.start_test_item(306 name=msg,307 start_time=timestamp(),308 item_type=item_type,309 parent_item_id=item_id,310 )311 self._rp.finish_test_item(self._step_id, timestamp(), "PASSED")312 continue313 self._rp.log(314 timestamp(),315 msg,316 level="INFO",317 item_id=item_id,318 )319 @staticmethod...

Full Screen

Full Screen

report_portal.py

Source:report_portal.py Github

copy

Full Screen

...113 start_time=self.timestamp(),114 description='Test name - {}'.format(self.args['simulation']))115 errors_len = len(errors)116 if errors_len > 0:117 service.start_test_item(name="Functional errors",118 start_time=self.timestamp(),119 description="This simulation has failed requests",120 item_type="SUITE")121 for key in errors:122 # Start test item.123 item_name = self.get_item_name(errors[key])124 service.start_test_item(name=item_name,125 description="This request was failed {} times".format(126 errors[key]['Error count']),127 tags=[errors[key]['Request URL']],128 start_time=self.timestamp(),129 item_type="STEP",130 parameters={"simulation": self.args['simulation'],131 'test type': self.args['type']})132 self.log_message(service, 'Request name', errors[key], 'WARN')133 self.log_message(service, 'Method', errors[key], 'WARN')134 self.log_message(service, 'Request URL', errors[key], 'WARN')135 self.log_message(service, 'Request_params', errors[key], 'WARN')136 self.log_message(service, 'Request headers', errors[key], 'INFO')137 self.log_message(service, 'Error count', errors[key], 'WARN')138 self.log_message(service, 'Error_message', errors[key], 'WARN')139 self.log_message(service, 'Response code', errors[key], 'WARN')140 self.log_message(service, 'Response', errors[key], 'WARN')141 self.log_unique_error_id(service, errors[key]['Request name'], errors[key]['Method'],142 errors[key]['Response code'])143 service.finish_test_item(end_time=self.timestamp(), status="FAILED")144 service.finish_test_item(end_time=self.timestamp(), status="FAILED")145 else:146 service.start_test_item(name="Functional errors",147 start_time=self.timestamp(),148 item_type="STEP",149 description='This simulation has no functional errors')150 service.finish_test_item(end_time=self.timestamp(), status="PASSED")151 if performance_degradation_rate > self.performance_degradation_rate:152 service.start_test_item(name="Compare to baseline",153 start_time=self.timestamp(),154 description="Test \"{}\" failed with performance degradation rate {}"155 .format(self.args['simulation'], performance_degradation_rate),156 item_type="SUITE")157 service.log(time=self.timestamp(),158 message="The following requests are slower than baseline:",159 level="{}".format('INFO'))160 for request in compare_with_baseline:161 service.start_test_item(name="\"{}\" reached {} ms by {}. Baseline {} ms."162 .format(request['request_name'], request['response_time'],163 self.args['comparison_metric'], request['baseline']),164 tags=['performance degradation'],165 start_time=self.timestamp(),166 item_type="STEP",167 parameters={'simulation': self.args['simulation'],168 'test type': self.args['type']})169 service.log(time=self.timestamp(), message="\"{}\" reached {} ms by {}. Baseline {} ms."170 .format(request['request_name'], request['response_time'],171 self.args['comparison_metric'], request['baseline']),172 level="{}".format('WARN'))173 service.finish_test_item(end_time=self.timestamp(), status="FAILED")174 service.log(time=self.timestamp(), message=hashlib.sha256(175 "{} performance degradation".format(self.args['simulation']).strip().encode('utf-8')).hexdigest(),176 level='ERROR')177 service.finish_test_item(end_time=self.timestamp(), status="FAILED")178 else:179 service.start_test_item(name="Compare to baseline",180 start_time=self.timestamp(),181 item_type="STEP",182 description='Performance degradation rate less than {}'183 .format(self.performance_degradation_rate))184 service.finish_test_item(end_time=self.timestamp(), status="PASSED")185 if missed_threshold_rate > self.missed_thresholds_rate:186 service.start_test_item(name="Compare with thresholds",187 start_time=self.timestamp(),188 description="Test \"{}\" failed with missed thresholds rate {}"189 .format(self.args['simulation'], missed_threshold_rate),190 item_type="SUITE")191 for color in ["yellow", "red"]:192 colored = False193 for th in compare_with_thresholds:194 if th['threshold'] == color:195 service.start_test_item(name="{} threshold for \"{}\""196 .format(color, th['request_name']),197 tags=['missed thresholds'],198 start_time=self.timestamp(),199 item_type="STEP",200 parameters={'simulation': self.args['simulation'],201 'test type': self.args['type']})202 if not colored:203 service.log(time=self.timestamp(),204 message=f"The following {color} thresholds were exceeded:", level="INFO")205 appendage = calculate_appendage(th['target'])206 service.log(time=self.timestamp(),207 message=f"\"{th['request_name']}\" {th['target']}{appendage} with value {th['metric']}{appendage} exceeded threshold of {th[color]}{appendage}",208 level="WARN")209 service.finish_test_item(end_time=self.timestamp(), status="FAILED")210 service.log(time=self.timestamp(), message=hashlib.sha256(211 "{} missed thresholds".format(self.args['simulation']).strip().encode('utf-8')).hexdigest(),212 level='ERROR')213 service.finish_test_item(end_time=self.timestamp(), status="FAILED")214 else:215 service.start_test_item(name="Compare with thresholds",216 start_time=self.timestamp(),217 item_type="STEP",218 description='Missed thresholds rate less than {}'219 .format(self.missed_thresholds_rate))220 service.finish_test_item(end_time=self.timestamp(), status="PASSED")221 # Finish launch.222 service.finish_launch(end_time=self.timestamp())...

Full Screen

Full Screen

reportportal.py

Source:reportportal.py Github

copy

Full Screen

...42 )43 traceback.print_exception(*self._rp_exc_info, file=sys.stderr)44 def _end_current_test_item(self, end_time, status):45 self.service.finish_test_item(end_time=make_time(end_time), status=status)46 def _start_test_item(self, item_type, start_time, name, description, wrapped=False):47 if wrapped:48 self.service.start_test_item(49 item_type="SUITE", start_time=make_time(start_time),50 name=name, description=description51 )52 self.service.start_test_item(53 item_type=item_type, start_time=make_time(start_time),54 name=name, description=description55 )56 def _end_test_item(self, end_time, is_successful, wrapped=False):57 status = "passed" if is_successful else "failed"58 if wrapped:59 self._end_current_test_item(end_time, status=status)60 self._end_current_test_item(end_time, status=status)61 def on_test_session_start(self, event):62 if self._has_rp_error():63 return64 self.service.start_launch(65 name=self.launch_name, description=self.launch_description, start_time=make_time(event.time)66 )67 def on_test_session_end(self, event):68 if self._has_rp_error():69 self._show_rp_error()70 else:71 self.service.finish_launch(end_time=make_time(event.time))72 self.service.terminate()73 if self._has_rp_error():74 self._show_rp_error()75 def on_test_session_setup_start(self, event):76 if self._has_rp_error():77 return78 self._start_test_item(79 item_type="BEFORE_CLASS", start_time=event.time,80 name="session_setup", description="Test Session Setup",81 wrapped=True82 )83 def on_test_session_setup_end(self, event):84 if self._has_rp_error():85 return86 self._end_test_item(87 event.time,88 not self.report.test_session_setup or self.report.test_session_setup.is_successful(),89 wrapped=True90 )91 def on_test_session_teardown_start(self, event):92 if self._has_rp_error():93 return94 self._start_test_item(95 item_type="AFTER_CLASS", start_time=event.time,96 name="session_teardown", description="Test Session Teardown",97 wrapped=True98 )99 def on_test_session_teardown_end(self, event):100 if self._has_rp_error():101 return102 self._end_test_item(103 event.time,104 not self.report.test_session_teardown or self.report.test_session_teardown.is_successful(),105 wrapped=True106 )107 def on_suite_start(self, event):108 if self._has_rp_error():109 return110 suite = event.suite111 self.service.start_test_item(112 item_type="SUITE", start_time=make_time(event.time),113 name=suite.name, description=suite.description,114 tags=make_tags_from_test_tree_node(suite)115 )116 def on_suite_end(self, event):117 if self._has_rp_error():118 return119 self._end_current_test_item(event.time, status="passed")120 def on_suite_setup_start(self, event):121 if self._has_rp_error():122 return123 self._start_test_item(124 item_type="BEFORE_CLASS", start_time=event.time,125 name="suite_setup", description="Suite Setup",126 wrapped=len(event.suite.get_suites()) > 0127 )128 def on_suite_setup_end(self, event):129 if self._has_rp_error():130 return131 suite_data = self.report.get_suite(event.suite)132 self._end_test_item(133 event.time,134 not suite_data.suite_setup or suite_data.suite_setup.is_successful(),135 wrapped=len(event.suite.get_suites()) > 0136 )137 def on_suite_teardown_start(self, event):138 if self._has_rp_error():139 return140 self._start_test_item(141 item_type="AFTER_CLASS", start_time=event.time,142 name="suite_teardown", description="Suite Teardown",143 wrapped=len(event.suite.get_suites()) > 0144 )145 def on_suite_teardown_end(self, event):146 if self._has_rp_error():147 return148 suite_data = self.report.get_suite(event.suite)149 self._end_test_item(150 event.time,151 not suite_data.suite_teardown or suite_data.suite_teardown.is_successful(),152 wrapped=len(event.suite.get_suites()) > 0153 )154 def on_test_start(self, event):155 if self._has_rp_error():156 return157 test = event.test158 self.service.start_test_item(159 item_type="TEST", start_time=make_time(event.time),160 name=test.name, description=test.description,161 tags=make_tags_from_test_tree_node(test)162 )163 def on_test_end(self, event):164 if self._has_rp_error():165 return166 test_data = self.report.get_test(event.test)167 self._end_current_test_item(event.time, test_data.status)168 def _bypass_test(self, test, status, time):169 if self._has_rp_error():170 return171 self.service.start_test_item(172 item_type="TEST", start_time=make_time(time),173 name=test.name, description=test.description, tags=test.tags,174 )175 self._end_current_test_item(time, status=status)176 def on_test_skipped(self, event):177 if self._has_rp_error():178 return179 self._bypass_test(event.test, "skipped", event.time)180 def on_disabled_test(self, event):181 # do not log disabled test, moreover it seems that there is not corresponding status in ReportPortal182 pass183 def on_step_start(self, event):184 if self._has_rp_error():185 return...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Lemoncheesecake automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful