How to use assert_logs method in Testify

Best Python code snippet using Testify_python

test_logging.py

Source:test_logging.py Github

copy

Full Screen

...17def _clbd(lbid, port):18 return CLBDescription(lb_id=lbid, port=port)19class LogStepsTests(SynchronousTestCase):20 """Tests for :func:`log_steps`."""21 def assert_logs(self, steps, intents):22 """Log some steps and ensure they result in the given Log intents."""23 sequence = SequenceDispatcher([(intent, noop) for intent in intents])24 with sequence.consume():25 sync_perform(test_dispatcher(sequence), log_steps(steps))26 def test_unhandled_steps(self):27 """28 Arbitrary unhandled steps return an effect that performs no logging.29 """30 steps = pbag([ConvergeLater([ErrorReason.String("foo")])])31 self.assert_logs(steps, [])32 def test_create_servers(self):33 """Logs :obj:`CreateServer`."""34 cfg = {'configgy': 'configged', 'nested': {'a': 'b'}}35 cfg2 = {'configgy': 'configged', 'nested': {'a': 'c'}}36 creates = pbag([37 CreateServer(server_config=freeze(cfg)),38 CreateServer(server_config=freeze(cfg)),39 CreateServer(server_config=freeze(cfg2))40 ])41 self.assert_logs(creates, [42 Log('convergence-create-servers',43 fields={'num_servers': 2, 'server_config': cfg,44 'cloud_feed': True}),45 Log('convergence-create-servers',46 fields={'num_servers': 1, 'server_config': cfg2,47 'cloud_feed': True})48 ])49 def test_delete_servers(self):50 """Logs :obj:`DeleteServer`."""51 deletes = pbag([DeleteServer(server_id='1'),52 DeleteServer(server_id='2'),53 DeleteServer(server_id='3')])54 self.assert_logs(deletes, [55 Log('convergence-delete-servers',56 fields={'servers': ['1', '2', '3'], 'cloud_feed': True})57 ])58 def test_add_nodes_to_clbs(self):59 """Logs :obj:`AddNodesToCLB`."""60 adds = pbag([61 AddNodesToCLB(62 lb_id='lbid1',63 address_configs=pset([('10.0.0.1', _clbd('lbid1', 1234))])),64 AddNodesToCLB(65 lb_id='lbid1',66 address_configs=pset([('10.0.0.2', _clbd('lbid1', 1235))])),67 AddNodesToCLB(68 lb_id='lbid2',69 address_configs=pset([('10.0.0.1', _clbd('lbid2', 4321))]))])70 self.assert_logs(adds, [71 Log('convergence-add-clb-nodes',72 fields={'lb_id': 'lbid1',73 'addresses': ['10.0.0.1:1234', '10.0.0.2:1235'],74 'cloud_feed': True}),75 Log('convergence-add-clb-nodes',76 fields={'lb_id': 'lbid2',77 'addresses': ['10.0.0.1:4321'],78 'cloud_feed': True})79 ])80 def test_remove_nodes_from_clbs(self):81 """Logs :obj:`RemoveNodesFromCLB`."""82 removes = pbag([83 RemoveNodesFromCLB(lb_id='lbid1', node_ids=pset(['a', 'b', 'c'])),84 RemoveNodesFromCLB(lb_id='lbid2', node_ids=pset(['d', 'e', 'f']))85 ])86 self.assert_logs(removes, [87 Log('convergence-remove-clb-nodes',88 fields={'lb_id': 'lbid1',89 'nodes': ['a', 'b', 'c'],90 'cloud_feed': True}),91 Log('convergence-remove-clb-nodes',92 fields={'lb_id': 'lbid2',93 'nodes': ['d', 'e', 'f'],94 'cloud_feed': True}),95 ])96 def test_change_clb_node(self):97 """Logs :obj:`ChangeCLBNode`."""98 changes = pbag([99 ChangeCLBNode(lb_id='lbid1', node_id='node1',100 condition=CLBNodeCondition.DRAINING,101 type=CLBNodeType.PRIMARY,102 weight=50),103 ChangeCLBNode(lb_id='lbid1', node_id='node2',104 condition=CLBNodeCondition.DRAINING,105 type=CLBNodeType.PRIMARY,106 weight=50),107 ChangeCLBNode(lb_id='lbid1', node_id='node3',108 condition=CLBNodeCondition.ENABLED,109 type=CLBNodeType.PRIMARY,110 weight=50),111 ChangeCLBNode(lb_id='lbid2', node_id='node4',112 condition=CLBNodeCondition.ENABLED,113 type=CLBNodeType.PRIMARY,114 weight=50),115 ])116 self.assert_logs(changes, [117 Log('convergence-change-clb-nodes',118 fields={119 'lb_id': 'lbid1', 'nodes': ['node3'],120 'type': 'PRIMARY', 'condition': 'ENABLED', 'weight': 50,121 'cloud_feed': True122 }),123 Log('convergence-change-clb-nodes',124 fields={125 'lb_id': 'lbid1', 'nodes': ['node1', 'node2'],126 'type': 'PRIMARY', 'condition': 'DRAINING', 'weight': 50,127 'cloud_feed': True128 }),129 Log('convergence-change-clb-nodes',130 fields={131 'lb_id': 'lbid2', 'nodes': ['node4'],132 'type': 'PRIMARY', 'condition': 'ENABLED', 'weight': 50,133 'cloud_feed': True134 }),135 ])136 def test_bulk_add_to_rcv3(self):137 """Logs :obj:`BulkAddToRCv3`."""138 adds = pbag([139 BulkAddToRCv3(lb_node_pairs=pset([140 ('lb1', 'node1'), ('lb1', 'node2'),141 ('lb2', 'node2'), ('lb2', 'node3'),142 ('lb3', 'node4')])),143 BulkAddToRCv3(lb_node_pairs=pset([144 ('lba', 'nodea'), ('lba', 'nodeb'),145 ('lb1', 'nodea')]))146 ])147 self.assert_logs(adds, [148 Log('convergence-add-rcv3-nodes',149 fields={'lb_id': 'lb1', 'servers': ['node1', 'node2', 'nodea'],150 'cloud_feed': True}),151 Log('convergence-add-rcv3-nodes',152 fields={'lb_id': 'lb2', 'servers': ['node2', 'node3'],153 'cloud_feed': True}),154 Log('convergence-add-rcv3-nodes',155 fields={'lb_id': 'lb3', 'servers': ['node4'],156 'cloud_feed': True}),157 Log('convergence-add-rcv3-nodes',158 fields={'lb_id': 'lba', 'servers': ['nodea', 'nodeb'],159 'cloud_feed': True})160 ])161 def test_bulk_remove_from_rcv3(self):162 """Logs :obj:`BulkRemoveFromRCv3`."""163 adds = pbag([164 BulkRemoveFromRCv3(lb_node_pairs=pset([165 ('lb1', 'node1'), ('lb1', 'node2'),166 ('lb2', 'node2'), ('lb2', 'node3'),167 ('lb3', 'node4')])),168 BulkRemoveFromRCv3(lb_node_pairs=pset([169 ('lba', 'nodea'), ('lba', 'nodeb'),170 ('lb1', 'nodea')]))171 ])172 self.assert_logs(adds, [173 Log('convergence-remove-rcv3-nodes',174 fields={'lb_id': 'lb1', 'servers': ['node1', 'node2', 'nodea'],175 'cloud_feed': True}),176 Log('convergence-remove-rcv3-nodes',177 fields={'lb_id': 'lb2', 'servers': ['node2', 'node3'],178 'cloud_feed': True}),179 Log('convergence-remove-rcv3-nodes',180 fields={'lb_id': 'lb3', 'servers': ['node4'],181 'cloud_feed': True}),182 Log('convergence-remove-rcv3-nodes',183 fields={'lb_id': 'lba', 'servers': ['nodea', 'nodeb'],184 'cloud_feed': True})185 ])186 def test_set_metadata_item_on_server(self):187 """Logs :obj:`SetMetadataItemOnServer`."""188 sets = pbag([189 SetMetadataItemOnServer(server_id='s1', key='k1', value='v1'),190 SetMetadataItemOnServer(server_id='s2', key='k1', value='v1'),191 SetMetadataItemOnServer(server_id='s3', key='k2', value='v2'),192 ])193 self.assert_logs(sets, [194 Log('convergence-set-server-metadata',195 fields={'servers': ['s1', 's2'], 'key': 'k1', 'value': 'v1',196 'cloud_feed': True}),197 Log('convergence-set-server-metadata',198 fields={'servers': ['s3'], 'key': 'k2', 'value': 'v2',199 'cloud_feed': True})...

Full Screen

Full Screen

test_qibo_benchmarks.py

Source:test_qibo_benchmarks.py Github

copy

Full Screen

1import pytest2from benchmarks.scripts import circuit_benchmark, evolution_benchmark3def assert_logs(logs, nqubits, backend, nreps=1):4 assert logs[-1]["nqubits"] == nqubits5 assert logs[-1]["backend"] == backend6 assert logs[-1]["simulation_times_mean"] >= 07 assert logs[-1]["transfer_times_mean"] >= 08 assert len(logs[-1]["simulation_times"]) == nreps9 assert len(logs[-1]["transfer_times"]) == nreps10@pytest.mark.parametrize("nreps", [1, 5])11@pytest.mark.parametrize("nlayers", ["1", "4"])12@pytest.mark.parametrize("gate", ["H", "X", "Y", "Z"])13def test_one_qubit_gate_benchmark(nqubits, backend, transfer, nreps,14 nlayers, gate):15 logs = circuit_benchmark(nqubits, backend, circuit_name="one-qubit-gate",16 nreps=nreps, transfer=transfer,17 circuit_options=f"gate={gate},nlayers={nlayers}")18 assert_logs(logs, nqubits, backend, nreps)19 target_options = f"nqubits={nqubits}, nlayers={nlayers}, "20 target_options += f"gate={gate}, params={{}}"21 assert logs[-1]["circuit"] == "one-qubit-gate"22 assert logs[-1]["circuit_options"] == target_options23@pytest.mark.parametrize("gate,params",24 [("RX", "theta=0.1"), ("RZ", "theta=0.2"),25 ("U1", "theta=0.3"), ("U2", "phi=0.2,lam=0.3"),26 ("U3", "theta=0.1,phi=0.2,lam=0.3")])27def test_one_qubit_gate_param_benchmark(nqubits, backend, gate, params):28 logs = circuit_benchmark(nqubits, backend, circuit_name="one-qubit-gate",29 circuit_options=f"gate={gate},{params}")30 assert_logs(logs, nqubits, backend)31 target_options = f"nqubits={nqubits}, nlayers=1, gate={gate}"32 paramdict = {}33 for param in params.split(","):34 k, v = param.split("=")35 paramdict[k] = v36 target_options = f"{target_options}, params={paramdict}"37 assert logs[-1]["circuit"] == "one-qubit-gate"38 assert logs[-1]["circuit_options"] == target_options39@pytest.mark.parametrize("nreps", [1, 5])40@pytest.mark.parametrize("nlayers", ["1", "4"])41@pytest.mark.parametrize("gate", ["CNOT", "SWAP", "CZ"])42def test_two_qubit_gate_benchmark(nqubits, backend, transfer, nreps,43 nlayers, gate):44 logs = circuit_benchmark(nqubits, backend, circuit_name="two-qubit-gate",45 nreps=nreps, transfer=transfer,46 circuit_options=f"gate={gate},nlayers={nlayers}")47 assert_logs(logs, nqubits, backend, nreps)48 target_options = f"nqubits={nqubits}, nlayers={nlayers}, "49 target_options += f"gate={gate}, params={{}}"50 assert logs[-1]["circuit"] == "two-qubit-gate"51 assert logs[-1]["circuit_options"] == target_options52@pytest.mark.parametrize("gate,params",53 [("CRX", "theta=0.1"), ("CRZ", "theta=0.2"),54 ("CU1", "theta=0.3"), ("CU2", "phi=0.2,lam=0.3"),55 ("CU3", "theta=0.1,phi=0.2,lam=0.3"),56 ("fSim", "theta=0.1,phi=0.2")])57def test_two_qubit_gate_param_benchmark(nqubits, backend, gate, params):58 logs = circuit_benchmark(nqubits, backend, circuit_name="two-qubit-gate",59 circuit_options=f"gate={gate},{params}")60 assert_logs(logs, nqubits, backend)61 target_options = f"nqubits={nqubits}, nlayers=1, gate={gate}"62 paramdict = {}63 for param in params.split(","):64 k, v = param.split("=")65 paramdict[k] = v66 target_options = f"{target_options}, params={paramdict}"67 assert logs[-1]["circuit"] == "two-qubit-gate"68 assert logs[-1]["circuit_options"] == target_options69@pytest.mark.parametrize("nreps", [1, 5])70@pytest.mark.parametrize("swaps", [False, True])71def test_qft_benchmark(nqubits, backend, transfer, nreps, swaps):72 logs = circuit_benchmark(nqubits, backend, circuit_name="qft",73 nreps=nreps, transfer=transfer,74 circuit_options=f"swaps={swaps}")75 assert_logs(logs, nqubits, backend, nreps)76 target_options = f"nqubits={nqubits}, swaps={swaps}"77 assert logs[-1]["circuit"] == "qft"78 assert logs[-1]["circuit_options"] == target_options79@pytest.mark.parametrize("varlayer", [False, True])80def test_variational_benchmark(nqubits, backend, varlayer):81 logs = circuit_benchmark(nqubits, backend, circuit_name="variational",82 circuit_options=f"varlayer={varlayer}")83 assert_logs(logs, nqubits, backend)84 target_options = f"nqubits={nqubits}, nlayers=1, seed=123, varlayer={varlayer}"85 assert logs[-1]["circuit"] == "variational"86 assert logs[-1]["circuit_options"] == target_options87def test_bernstein_vazirani_benchmark(nqubits, backend):88 logs = circuit_benchmark(nqubits, backend, circuit_name="bv")89 assert_logs(logs, nqubits, backend)90 assert logs[-1]["circuit"] == "bv"91 assert logs[-1]["circuit_options"] == f"nqubits={nqubits}"92@pytest.mark.parametrize("random", [True, False])93def test_hidden_shift_benchmark(nqubits, backend, random):94 shift = "" if random else nqubits * "0"95 logs = circuit_benchmark(nqubits, backend, circuit_name="hs",96 circuit_options=f"shift={shift}")97 assert_logs(logs, nqubits, backend)98 target_options = f"nqubits={nqubits}, shift={shift}"99 assert logs[-1]["circuit"] == "hs"100 assert logs[-1]["circuit_options"] == target_options101def test_qaoa_benchmark(backend):102 logs = circuit_benchmark(4, backend, circuit_name="qaoa")103 assert_logs(logs, 4, backend)104 target_options = f"nqubits=4, nparams=2, graph=, seed=123"105 assert logs[-1]["circuit"] == "qaoa"106 assert logs[-1]["circuit_options"] == target_options107@pytest.mark.parametrize("depth", ["2", "5", "10"])108def test_supremacy_benchmark(nqubits, backend, depth):109 logs = circuit_benchmark(nqubits, backend, circuit_name="supremacy",110 circuit_options=f"depth={depth}")111 assert_logs(logs, nqubits, backend)112 target_options = f"nqubits={nqubits}, depth={depth}, seed=123"113 assert logs[-1]["circuit"] == "supremacy"114 assert logs[-1]["circuit_options"] == target_options115@pytest.mark.parametrize("simtime", ["1", "2.5"])116def test_basis_change_benchmark(nqubits, backend, simtime):117 logs = circuit_benchmark(nqubits, backend, circuit_name="bc",118 circuit_options=f"simulation_time={simtime}")119 assert_logs(logs, nqubits, backend)120 target_options = f"nqubits={nqubits}, simulation_time={simtime}, seed=123"121 assert logs[-1]["circuit"] == "bc"122 assert logs[-1]["circuit_options"] == target_options123@pytest.mark.parametrize("depth", ["2", "5", "8"])124def test_quantum_volume_benchmark(nqubits, backend, depth):125 logs = circuit_benchmark(nqubits, backend, circuit_name="qv",126 circuit_options=f"depth={depth}")127 assert_logs(logs, nqubits, backend)128 target_options = f"nqubits={nqubits}, depth={depth}, seed=123"129 assert logs[-1]["circuit"] == "qv"130 assert logs[-1]["circuit_options"] == target_options131@pytest.mark.parametrize("dt", [0.01, 0.05, 0.1])132@pytest.mark.parametrize("dense", [False, True])133def test_adiabatic_evolution_benchmark(nqubits, dt, backend, dense, solver="exp"):134 logs = evolution_benchmark(nqubits, dt, solver, backend, dense=dense)135 assert logs[-1]["nqubits"] == nqubits136 assert logs[-1]["dt"] == dt137 assert logs[-1]["backend"] == backend...

Full Screen

Full Screen

logs_test.py

Source:logs_test.py Github

copy

Full Screen

...15import warnings16import pytest17import cirq.testing18def test_assert_logs_valid_single_logs():19 with cirq.testing.assert_logs('apple'):20 logging.error('orange apple fruit')21 with cirq.testing.assert_logs('apple', 'orange'):22 logging.error('orange apple fruit')23 with cirq.testing.assert_logs():24 logging.error('orange apple fruit')25 with cirq.testing.assert_logs('apple', 'fruit'):26 logging.error('orange apple fruit')27 with cirq.testing.assert_logs('apple') as logs:28 logging.error('orange apple fruit')29 assert len(logs) == 130 assert logs[0].getMessage() == 'orange apple fruit'31 assert logs[0].levelno == logging.ERROR32 with cirq.testing.assert_logs('apple'):33 warnings.warn('orange apple fruit')34def test_assert_logs_invalid_single_logs():35 match = (36 '^dog expected to appear in log messages but it was not found. '37 'Log messages: \\[\'orange apple fruit\'\\].$'38 )39 with pytest.raises(AssertionError, match=match):40 with cirq.testing.assert_logs('dog'):41 logging.error('orange apple fruit')42 with pytest.raises(AssertionError, match='dog'):43 with cirq.testing.assert_logs('dog', 'cat'):44 logging.error('orange apple fruit')45def test_assert_logs_valid_multiple_logs():46 with cirq.testing.assert_logs('apple', count=2):47 logging.error('orange apple fruit')48 logging.error('other')49 with cirq.testing.assert_logs('apple', count=2):50 logging.error('other')51 logging.error('orange apple fruit')52 with cirq.testing.assert_logs('apple', count=2):53 logging.error('other')54 logging.error('orange apple fruit')55 with cirq.testing.assert_logs('apple', count=2):56 logging.error('other')57 logging.error('orange apple fruit')58 with cirq.testing.assert_logs('apple', 'other', count=2):59 logging.error('other')60 logging.error('orange apple fruit')61 with cirq.testing.assert_logs('apple', count=3):62 logging.error('orange apple fruit')63 logging.error('other')64 logging.warning('other two')65def test_assert_logs_invalid_multiple_logs():66 with pytest.raises(AssertionError, match='^Expected 1 log message but got 2. Log messages.*$'):67 with cirq.testing.assert_logs('dog'):68 logging.error('orange apple fruit')69 logging.error('dog')70 with pytest.raises(AssertionError, match='^Expected 2 log message but got 3. Log messages.*$'):71 with cirq.testing.assert_logs('dog', count=2):72 logging.error('orange apple fruit')73 logging.error('other')74 logging.error('dog')75 match = (76 '^dog expected to appear in log messages but it was not found. '77 'Log messages: \\[\'orange\', \'other\', \'whatever\'\\].$'78 )79 with pytest.raises(AssertionError, match=match):80 with cirq.testing.assert_logs('dog', count=3):81 logging.error('orange')82 logging.error('other')83 logging.error('whatever')84def test_assert_logs_log_level():85 # Default minlevel is WARNING, max level CRITICAL86 with cirq.testing.assert_logs('apple'):87 logging.error('orange apple fruit')88 logging.debug('should not')89 logging.info('count')90 with cirq.testing.assert_logs('apple', 'critical', count=2):91 logging.critical('critical')92 logging.error('orange apple fruit')93 logging.debug('should not')94 logging.info('count')95 with cirq.testing.assert_logs('apple', min_level=logging.INFO, count=2):96 logging.error('orange apple fruit')97 logging.debug('should not')98 logging.info('count')99 with cirq.testing.assert_logs('info only 1', min_level=logging.INFO, max_level=logging.INFO):100 with cirq.testing.assert_logs(101 'info warning 1', min_level=logging.WARNING, max_level=logging.WARNING102 ):103 logging.info("info only 1")104 logging.warning("info warning 1")105def test_invalid_levels():106 with pytest.raises(ValueError, match="min_level.*max_level"):107 with cirq.testing.assert_logs(108 "test", min_level=logging.CRITICAL, max_level=logging.WARNING109 ):110 pass111def test_assert_logs_warnings():112 # Capture all warnings in one context, so that test cases that will113 # display a warning do not do so when the test is run.114 with warnings.catch_warnings(record=True):115 with cirq.testing.assert_logs('apple'):116 warnings.warn('orange apple fruit')117 with cirq.testing.assert_logs('apple', count=2):118 warnings.warn('orange apple fruit')119 logging.error('other')120 with cirq.testing.assert_logs('apple', capture_warnings=False):121 logging.error('orange apple fruit')122 warnings.warn('warn')123 with pytest.raises(124 AssertionError, match='^Expected 1 log message but got 0. Log messages.*$'125 ):126 with cirq.testing.assert_logs('apple', capture_warnings=False):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Testify automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful