How to use test_stream_logs method in localstack

Best Python code snippet using localstack_python

test_job_ops_helper.py

Source:test_job_ops_helper.py Github

copy

Full Screen

1import re2import time3from collections import OrderedDict4from io import StringIO5from typing import Dict6from unittest.mock import Mock7import pytest8import vcr9from mock import mock_open, patch10from azure.ai.ml._restclient.runhistory.models import RunDetails, RunDetailsWarning11from azure.ai.ml._scope_dependent_operations import OperationScope12from azure.ai.ml.operations._job_ops_helper import (13 _get_sorted_filtered_logs,14 _incremental_print,15 list_logs,16 stream_logs_until_completion,17)18from azure.ai.ml.operations._run_operations import RunOperations19from .test_vcr_utils import before_record_cb20class DummyJob:21 class InteractionEndpoint:22 def __init__(self, **kwargs):23 self.endpoint = "testurl"24 class Properties:25 def __init__(self, **kwargs):26 super().__init__()27 self.experiment_name = "dummy_exp"28 self.services = {"Studio": DummyJob.InteractionEndpoint()}29 self.job_type = "Command"30 def __init__(self, **kwargs):31 super().__init__()32 self.name = "dummy"33 self.properties = DummyJob.Properties()34def fake_read():35 return mock_open(read_data="{}")36@pytest.fixture37def mock__commands():38 m = Mock(name="_commands")39 mock_run_history_facade = patch.dict("sys.modules", {"azureml._execution": m})40 mock_run_history_facade.start()41 yield m42 mock_run_history_facade.stop()43@pytest.fixture44def mock_time(request):45 p = patch("azure.ai.ml.operations._job_ops_helper.time")46 yield p.start()47 p.stop()48@pytest.fixture49def mock_run_operations(mock_workspace_scope: OperationScope, mock_aml_services_run_history: Mock) -> RunOperations:50 yield RunOperations(mock_workspace_scope, mock_aml_services_run_history)51@pytest.mark.skip("TODO 1907352: Relies on a missing VCR.py recording + test suite needs to be reworked")52@pytest.mark.unittest53class TestJobLogManager:54 def test_wait_for_completion_with_output(self, mock_run_operations):55 dummy_job = DummyJob()56 with patch.object(57 RunOperations,58 "get_run_details",59 side_effect=[60 RunDetails(status="Finalizing", log_files={"log1": "Log", "log2": "log"}),61 RunDetails(status="Completed", log_files={"log1": "Log", "log2": "log"}),62 ],63 ) as get_run_mock:64 stream_logs_until_completion(mock_run_operations, dummy_job)65 get_run_mock.assert_called()66 def test_wait_for_completion_with_error_silent(self, mock_run_operations):67 dummy_job = DummyJob()68 with patch.object(69 RunOperations,70 "get_run_details",71 return_value=RunDetails(status="Failed", warnings=[RunDetailsWarning(message="bad luck")]),72 ) as get_run_mock:73 stream_logs_until_completion(mock_run_operations, dummy_job, None, False)74 get_run_mock.assert_called_once()75 def test_wait_for_completion_with_error_raise(self, mock_run_operations):76 dummy_job = DummyJob()77 with patch.object(RunOperations, "get_run_details", return_value=RunDetails(status="Failed")) as get_run_mock:78 with pytest.raises(Exception):79 stream_logs_until_completion(mock_run_operations, dummy_job)80 get_run_mock.assert_called_once()81 # The list of logs that should be streamed, if you need to recreate,82 # you can just copy and paste the logFiles section from the Raw run JSON on the UI,83 # then keep here only the ones we stream84 _streamable_log_files_urls = OrderedDict(85 {86 "azureml-logs/55_azureml-execution-tvmps_f712ea79c8fca9c3c7f41774b414e867a0854377c8e411b095f30dd68f6d6027_d.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/55_azureml-execution-tvmps_f712ea79c8fca9c3c7f41774b414e867a0854377c8e411b095f30dd68f6d6027_d.txt?sv=2019-02-02&sr=b&sig=2B9oQEbsUwKZzw1eTUiyLJy64DRC%2BVOjv9lRb8Jx%2FLM%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",87 "azureml-logs/65_job_prep-tvmps_f712ea79c8fca9c3c7f41774b414e867a0854377c8e411b095f30dd68f6d6027_d.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/65_job_prep-tvmps_f712ea79c8fca9c3c7f41774b414e867a0854377c8e411b095f30dd68f6d6027_d.txt?sv=2019-02-02&sr=b&sig=2E1x1mUWF5Y8VD1e0yMqEZeWct4vngjES%2FJ3SFzKKxU%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",88 "azureml-logs/70_driver_log-worker-0.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/70_driver_log-worker-0.txt?sv=2019-02-02&sr=b&sig=8lXLfLMqGaQ7VNGLCKkQ%2BbdebJcyEFCJzNStYCRuVZc%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",89 "azureml-logs/75_job_post-tvmps_f712ea79c8fca9c3c7f41774b414e867a0854377c8e411b095f30dd68f6d6027_d.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/75_job_post-tvmps_f712ea79c8fca9c3c7f41774b414e867a0854377c8e411b095f30dd68f6d6027_d.txt?sv=2019-02-02&sr=b&sig=9YR6A64Tuq0E7KsgzPX7atqJ33eGjaJ8QeRaNaQ1%2BL4%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",90 }91 )92 # The list of logs that should NOT be streamed, if you need to recreate,93 # you can just copy and paste the logFiles section from the Raw run JSON on the UI,94 # then keep here only the ones we shouldn't stream95 _additional_log_files_urls = {96 "azureml-logs/process_info.json": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",97 "azureml-logs/process_status.json": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_status.json?sv=2019-02-02&sr=b&sig=FDFzfqtn9iYq2FMb5SOBGBu91k%2B8LQITcRiYYyLtDHs%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",98 "logs/azureml/job_prep_azureml.log": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/logs/azureml/job_prep_azureml.log?sv=2019-02-02&sr=b&sig=td0HUXBar%2FYv%2FhZiSdlPR516OH8bCMiBN3yH6dCSHvk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",99 "logs/azureml/job_release_azureml.log": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/logs/azureml/job_release_azureml.log?sv=2019-02-02&sr=b&sig=BeeRya%2FFZhqCNBk0hCJrks7%2Bejg9qTCFe5FNnf%2BUJyk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",100 "logs/azureml/worker0_373_azureml.log": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/logs/azureml/worker0_373_azureml.log?sv=2019-02-02&sr=b&sig=ySxUJjd1lqi%2FskcMfAYYFQ%2FyUQALbV0WH7jYtf%2FXaKk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",101 }102 _common_runtime_log_urls = {103 "user_logs/std_log.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",104 "azureml-logs/lifecycler/lifecycler.log": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",105 }106 _common_runtime_mpi_urls = {107 "user_logs/std_log_process_00.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",108 "user_logs/std_log_process_01.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",109 "user_logs/std_log_process_02.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",110 "user_logs/std_log_process_03.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",111 "user_logs/std_log_process_04.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",112 }113 _common_runtime_tensorflow_urls = {114 "user_logs/std_log_node_00_ps.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",115 "user_logs/std_log_node_01_ps.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",116 "user_logs/std_log_node_00_worker.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",117 "user_logs/std_log_node_01_worker.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",118 "user_logs/std_log_node_02_worker.txt": "https://rihorn24316837458.blob.core.windows.net/azureml/ExperimentRun/dcid.1a9952e7-f173-45c0-bd61-3cd591498bdf/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wn2pW00%2F86Qlo3NWOokMVGmaeModJNyHlIP5dDI4zqk%3D&skoid=e3f42e2c-d581-4b65-a966-631cfa961328&sktid=72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2021-05-03T13%3A11%3A21Z&ske=2021-05-04T08%3A32%3A18Z&sks=b&skv=2019-02-02&st=2021-05-03T13%3A45%3A30Z&se=2021-05-03T21%3A55%3A30Z&sp=r",119 }120 # Method to create a RunDetails based on the provided status,121 # the number of streamable files to include, and if whether to add or not122 # non-streamable log files123 def _get_run_details_dto(124 self, status="Finalizing", number_of_streamable_log_files=0, include_additional_log_files=False125 ) -> RunDetails:126 keys = self._streamable_log_files_urls.keys()127 # Check if there are enough streamable log files128 if number_of_streamable_log_files > len(keys):129 raise Exception(f"There are less than {number_of_streamable_log_files}")130 # Keep only the first number_of_streamable_log_files logs131 log_files = {}132 for key in keys:133 log_files[key] = self._streamable_log_files_urls[key]134 number_of_streamable_log_files -= 1135 if not number_of_streamable_log_files:136 break137 # Add the additional logs if specified138 if include_additional_log_files:139 log_files.update(self._additional_log_files_urls)140 return RunDetails(status=status, log_files=log_files)141 # Helper method to test that logs will be streamed comprehensively142 # and in predictable order independently of the sequence of run details.143 # Logs could be delivered at different pace after subsequent calls to get details144 def _test_stream_logs_helper(self, mock_run_operations, run_details_sequence=[]) -> None:145 my_vcr = vcr.VCR(before_record=before_record_cb)146 with patch("sys.stdout", new=StringIO()) as fake_out, patch.object(147 RunOperations, "get_run_details", side_effect=run_details_sequence148 ) as get_run_mock, patch.object(time, "sleep",) as fake_time, my_vcr.use_cassette(149 "cassettes/test_stream_logs.yaml"150 ):151 stream_logs_until_completion(mock_run_operations, DummyJob())152 # get_run_mock was called, and all the sequence of run details was consumed153 get_run_mock.assert_called()154 assert get_run_mock.call_count == len(run_details_sequence)155 # while streamed, we waited in between each call to get run details156 fake_time.assert_called()157 assert fake_time.call_count == len(run_details_sequence) - 1158 # Regext to checking on the 'Streaming <log name>' message159 reg_exp = re.compile(r"Streaming ([\S]*)")160 output = fake_out.getvalue()161 list_of_logs = list(self._streamable_log_files_urls.keys())162 # Check that all the logs were streamed163 assert reg_exp.findall(output) == list_of_logs164 # Check there were no duplicates165 assert len(list_of_logs) == len(set(list_of_logs))166 def test_list_logs(self, mock_run_operations) -> None:167 with patch.object(168 RunOperations,169 "get_run_details",170 side_effect=[self._get_run_details_dto(status="Completed", number_of_streamable_log_files=3)],171 ) as get_run_mock:172 output = list_logs(mock_run_operations, DummyJob())173 get_run_mock.assert_called()174 assert len(output.items()) == 3175 # Method to test the golden path, a new log was added on each call to get run details176 @pytest.mark.vcr()177 def test_stream_logs_golden_path(self, mock_run_operations) -> None:178 run_details_sequence = [179 self._get_run_details_dto(status="Running"),180 self._get_run_details_dto(status="Finalizing", number_of_streamable_log_files=1),181 self._get_run_details_dto(status="Finalizing", number_of_streamable_log_files=2),182 self._get_run_details_dto(status="Finalizing", number_of_streamable_log_files=3),183 self._get_run_details_dto(status="Finalizing", number_of_streamable_log_files=4),184 self._get_run_details_dto(185 status="Completed", number_of_streamable_log_files=4, include_additional_log_files=True186 ),187 ]188 self._test_stream_logs_helper(mock_run_operations, run_details_sequence=run_details_sequence)189 # Method to test when all the logs were available at the same time190 @pytest.mark.vcr()191 def test_stream_logs_arriving_all_together(self, mock_run_operations) -> None:192 run_details_sequence = [193 self._get_run_details_dto(status="Running"),194 self._get_run_details_dto(status="Finalizing", number_of_streamable_log_files=4),195 self._get_run_details_dto(196 status="Completed", number_of_streamable_log_files=4, include_additional_log_files=True197 ),198 ]199 self._test_stream_logs_helper(mock_run_operations, run_details_sequence=run_details_sequence)200 # Method to test when the logs became available in batches of 2201 @pytest.mark.vcr()202 def test_stream_logs_arriving_in_batches(self, mock_run_operations) -> None:203 run_details_sequence = [204 self._get_run_details_dto(status="Running"),205 self._get_run_details_dto(status="Finalizing", number_of_streamable_log_files=2),206 self._get_run_details_dto(status="Finalizing", number_of_streamable_log_files=4),207 self._get_run_details_dto(208 status="Completed", number_of_streamable_log_files=4, include_additional_log_files=True209 ),210 ]211 self._test_stream_logs_helper(mock_run_operations, run_details_sequence=run_details_sequence)212 def test_get_streamable_logs_common_runtime_folder_structure(self) -> None:213 output = _get_sorted_filtered_logs(self._common_runtime_log_urls, "Command")214 assert len(output) == 1215 assert output[0] == "user_logs/std_log.txt"216 def test_get_all_logs_common_runtime_folder_structure(self) -> None:217 output = _get_sorted_filtered_logs(self._common_runtime_log_urls, "Command", {}, False)218 assert len(output) == 1219 assert output[0] == "user_logs/std_log.txt"220 def test_get_streamable_logs_common_runtime_mpi(self) -> None:221 output = _get_sorted_filtered_logs(self._common_runtime_mpi_urls, "Command")222 assert len(output) == 1223 assert output[0] == "user_logs/std_log_process_00.txt"224 def test_get_all_logs_common_runtime_mpi(self) -> None:225 output = _get_sorted_filtered_logs(self._common_runtime_mpi_urls, "Command", {}, False)226 assert len(output) == 5227 def test_get_streamable_logs_common_runtime_tensorflow(self) -> None:228 output = _get_sorted_filtered_logs(self._common_runtime_tensorflow_urls, "Command")229 assert len(output) == 1230 assert output[0] == "user_logs/std_log_node_00_ps.txt"231 def test_get_all_logs_common_runtime_tensorflow(self) -> None:232 output = _get_sorted_filtered_logs(self._common_runtime_tensorflow_urls, "Command", {}, False)233 assert len(output) == 5234 def test_stream_printing(self) -> None:235 log_name = "55_log_test"236 log_content = "line1\nline2\nline3\n"237 stream = StringIO()238 processed_logs: Dict[str, int] = {}239 _incremental_print(log_content, processed_logs, log_name, stream)240 # should contain the length of the log (3) + the header lines (4)241 assert len(stream.getvalue().splitlines()) == 7242 assert processed_logs[log_name] == 3243 # reset the state, to mock out the case where the first two lines have alread been read in244 processed_logs[log_name] = 2245 stream = StringIO()246 _incremental_print(log_content, processed_logs, log_name, stream)247 # should contain the length of the log (3) - skip previous lines (2) + no header lines (0)248 assert len(stream.getvalue().splitlines()) == 1249 assert processed_logs[log_name] == 3250 def test_empty_log_is_skipped(self) -> None:251 log_name = "55_log_test"252 log_content = ""253 stream = StringIO()254 processed_logs: Dict[str, int] = {}255 _incremental_print(log_content, processed_logs, log_name, stream)256 # should be empty, no header, no log.257 assert len(stream.getvalue().splitlines()) == 0...

Full Screen

Full Screen

test_worker.py

Source:test_worker.py Github

copy

Full Screen

...309@patch('pika.PlainCredentials')310@patch('pika.ConnectionParameters')311@patch('pika.BlockingConnection')312@patch('ammonite.callback.Sender', MockSender)313def test_stream_logs(a, b, c):314 print("starting stream log")315 container_name = "container"316 container_path = os.path.join(ROOT_DIR, "data")317 filename = os.path.join(container_path,318 container_name,319 "%s-json.log" % container_name)320 container = MockDockerContainer()321 config = MockConfig({'DOCKER': {'CONTAINER_PATH': container_path}})322 with LogCapture() as l:323 with open(filename, 'w') as fh:324 thread = Thread(target=stream_log,325 args=(container_name, "2", container, config))326 thread.daemon = True327 thread.start()...

Full Screen

Full Screen

test_agent.py

Source:test_agent.py Github

copy

Full Screen

...175 assert response == metrics_response()176# @pytest.mark.skip177# @pytest.mark.asyncio178# @pytest.mark.parametrize("expected", [metrics_response()])179# async def test_stream_logs(agent, expected):180# response = await agent.stream_logs(stream=True)181# assert response182@pytest.mark.asyncio183@pytest.mark.parametrize("expected", [200])184async def test_join(agent, expected):185 agent.client.expected = expected186 response = await agent.join("1.2.3.4")187 assert response.status == 200188@pytest.mark.asyncio189@pytest.mark.parametrize("expected", [200])190async def test_leave(agent, expected):191 agent.client.expected = expected192 response = await agent.leave()193 assert response.status == 200...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful