How to use get_run_ids method in stestr

Best Python code snippet using stestr_python

test_queued_run_coordinator_daemon.py

Source:test_queued_run_coordinator_daemon.py Github

copy

Full Screen

...55 pipeline_code_origin=pipeline_handle.get_python_origin(),56 pipeline_name="foo",57 **kwargs,58 )59def get_run_ids(runs_queue):60 return [run.run_id for run in runs_queue]61def test_attempt_to_launch_runs_filter(instance, workspace, daemon):62 create_run(63 instance,64 run_id="queued-run",65 status=PipelineRunStatus.QUEUED,66 )67 create_run(68 instance,69 run_id="non-queued-run",70 status=PipelineRunStatus.NOT_STARTED,71 )72 list(daemon.run_iteration(instance, workspace))73 assert get_run_ids(instance.run_launcher.queue()) == ["queued-run"]74def test_attempt_to_launch_runs_no_queued(instance, workspace, daemon):75 create_run(76 instance,77 run_id="queued-run",78 status=PipelineRunStatus.STARTED,79 )80 create_run(81 instance,82 run_id="non-queued-run",83 status=PipelineRunStatus.NOT_STARTED,84 )85 list(daemon.run_iteration(instance, workspace))86 assert instance.run_launcher.queue() == []87@pytest.mark.parametrize(88 "num_in_progress_runs",89 range(6),90)91def test_get_queued_runs_max_runs(num_in_progress_runs, workspace, daemon):92 max_runs = 493 with instance_for_queued_run_coordinator(max_concurrent_runs=max_runs) as instance:94 # fill run store with ongoing runs95 in_progress_run_ids = ["in_progress-run-{}".format(i) for i in range(num_in_progress_runs)]96 for i, run_id in enumerate(in_progress_run_ids):97 # get a selection of all in progress statuses98 status = IN_PROGRESS_RUN_STATUSES[i % len(IN_PROGRESS_RUN_STATUSES)]99 create_run(100 instance,101 run_id=run_id,102 status=status,103 )104 # add more queued runs than should be launched105 queued_run_ids = ["queued-run-{}".format(i) for i in range(max_runs + 1)]106 for run_id in queued_run_ids:107 create_run(108 instance,109 run_id=run_id,110 status=PipelineRunStatus.QUEUED,111 )112 list(daemon.run_iteration(instance, workspace))113 assert len(instance.run_launcher.queue()) == max(0, max_runs - num_in_progress_runs)114def test_disable_max_concurrent_runs_limit(workspace, daemon):115 with instance_for_queued_run_coordinator(max_concurrent_runs=-1) as instance:116 # create ongoing runs117 in_progress_run_ids = ["in_progress-run-{}".format(i) for i in range(5)]118 for i, run_id in enumerate(in_progress_run_ids):119 # get a selection of all in progress statuses120 status = IN_PROGRESS_RUN_STATUSES[i % len(IN_PROGRESS_RUN_STATUSES)]121 create_run(122 instance,123 run_id=run_id,124 status=status,125 )126 # add more queued runs127 queued_run_ids = ["queued-run-{}".format(i) for i in range(6)]128 for run_id in queued_run_ids:129 create_run(130 instance,131 run_id=run_id,132 status=PipelineRunStatus.QUEUED,133 )134 list(daemon.run_iteration(instance, workspace))135 assert len(instance.run_launcher.queue()) == 6136def test_priority(instance, workspace, daemon):137 create_run(instance, run_id="default-pri-run", status=PipelineRunStatus.QUEUED)138 create_run(139 instance,140 run_id="low-pri-run",141 status=PipelineRunStatus.QUEUED,142 tags={PRIORITY_TAG: "-1"},143 )144 create_run(145 instance,146 run_id="hi-pri-run",147 status=PipelineRunStatus.QUEUED,148 tags={PRIORITY_TAG: "3"},149 )150 list(daemon.run_iteration(instance, workspace))151 assert get_run_ids(instance.run_launcher.queue()) == [152 "hi-pri-run",153 "default-pri-run",154 "low-pri-run",155 ]156def test_priority_on_malformed_tag(instance, workspace, daemon):157 create_run(158 instance,159 run_id="bad-pri-run",160 status=PipelineRunStatus.QUEUED,161 tags={PRIORITY_TAG: "foobar"},162 )163 list(daemon.run_iteration(instance, workspace))164 assert get_run_ids(instance.run_launcher.queue()) == ["bad-pri-run"]165def test_tag_limits(workspace, daemon):166 with instance_for_queued_run_coordinator(167 max_concurrent_runs=10,168 tag_concurrency_limits=[{"key": "database", "value": "tiny", "limit": 1}],169 ) as instance:170 create_run(171 instance,172 run_id="tiny-1",173 status=PipelineRunStatus.QUEUED,174 tags={"database": "tiny"},175 )176 create_run(177 instance,178 run_id="tiny-2",179 status=PipelineRunStatus.QUEUED,180 tags={"database": "tiny"},181 )182 create_run(183 instance,184 run_id="large-1",185 status=PipelineRunStatus.QUEUED,186 tags={"database": "large"},187 )188 list(daemon.run_iteration(instance, workspace))189 assert get_run_ids(instance.run_launcher.queue()) == ["tiny-1", "large-1"]190def test_tag_limits_just_key(workspace, daemon):191 with instance_for_queued_run_coordinator(192 max_concurrent_runs=10,193 tag_concurrency_limits=[194 {"key": "database", "value": {"applyLimitPerUniqueValue": False}, "limit": 2}195 ],196 ) as instance:197 create_run(198 instance,199 run_id="tiny-1",200 status=PipelineRunStatus.QUEUED,201 tags={"database": "tiny"},202 )203 create_run(204 instance,205 run_id="tiny-2",206 status=PipelineRunStatus.QUEUED,207 tags={"database": "tiny"},208 )209 create_run(210 instance,211 run_id="large-1",212 status=PipelineRunStatus.QUEUED,213 tags={"database": "large"},214 )215 list(daemon.run_iteration(instance, workspace))216 assert get_run_ids(instance.run_launcher.queue()) == ["tiny-1", "tiny-2"]217def test_multiple_tag_limits(workspace, daemon):218 with instance_for_queued_run_coordinator(219 max_concurrent_runs=10,220 tag_concurrency_limits=[221 {"key": "database", "value": "tiny", "limit": 1},222 {"key": "user", "value": "johann", "limit": 2},223 ],224 ) as instance:225 create_run(226 instance,227 run_id="run-1",228 status=PipelineRunStatus.QUEUED,229 tags={"database": "tiny", "user": "johann"},230 )231 create_run(232 instance,233 run_id="run-2",234 status=PipelineRunStatus.QUEUED,235 tags={"database": "tiny"},236 )237 create_run(238 instance,239 run_id="run-3",240 status=PipelineRunStatus.QUEUED,241 tags={"user": "johann"},242 )243 create_run(244 instance,245 run_id="run-4",246 status=PipelineRunStatus.QUEUED,247 tags={"user": "johann"},248 )249 list(daemon.run_iteration(instance, workspace))250 assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]251def test_overlapping_tag_limits(workspace, daemon):252 with instance_for_queued_run_coordinator(253 max_concurrent_runs=10,254 tag_concurrency_limits=[255 {"key": "foo", "limit": 2},256 {"key": "foo", "value": "bar", "limit": 1},257 ],258 ) as instance:259 create_run(260 instance,261 run_id="run-1",262 status=PipelineRunStatus.QUEUED,263 tags={"foo": "bar"},264 )265 create_run(266 instance,267 run_id="run-2",268 status=PipelineRunStatus.QUEUED,269 tags={"foo": "bar"},270 )271 create_run(272 instance,273 run_id="run-3",274 status=PipelineRunStatus.QUEUED,275 tags={"foo": "other"},276 )277 create_run(278 instance,279 run_id="run-4",280 status=PipelineRunStatus.QUEUED,281 tags={"foo": "other"},282 )283 list(daemon.run_iteration(instance, workspace))284 assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]285def test_limits_per_unique_value(workspace, daemon):286 with instance_for_queued_run_coordinator(287 max_concurrent_runs=10,288 tag_concurrency_limits=[289 {"key": "foo", "limit": 1, "value": {"applyLimitPerUniqueValue": True}},290 ],291 ) as instance:292 create_run(293 instance,294 run_id="run-1",295 status=PipelineRunStatus.QUEUED,296 tags={"foo": "bar"},297 )298 create_run(299 instance,300 run_id="run-2",301 status=PipelineRunStatus.QUEUED,302 tags={"foo": "bar"},303 )304 list(daemon.run_iteration(instance, workspace))305 create_run(306 instance,307 run_id="run-3",308 status=PipelineRunStatus.QUEUED,309 tags={"foo": "other"},310 )311 create_run(312 instance,313 run_id="run-4",314 status=PipelineRunStatus.QUEUED,315 tags={"foo": "other"},316 )317 list(daemon.run_iteration(instance, workspace))318 assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]319def test_limits_per_unique_value_overlapping_limits(workspace, daemon):320 with instance_for_queued_run_coordinator(321 max_concurrent_runs=10,322 tag_concurrency_limits=[323 {"key": "foo", "limit": 1, "value": {"applyLimitPerUniqueValue": True}},324 {"key": "foo", "limit": 2},325 ],326 ) as instance:327 create_run(328 instance,329 run_id="run-1",330 status=PipelineRunStatus.QUEUED,331 tags={"foo": "bar"},332 )333 create_run(334 instance,335 run_id="run-2",336 status=PipelineRunStatus.QUEUED,337 tags={"foo": "bar"},338 )339 create_run(340 instance,341 run_id="run-3",342 status=PipelineRunStatus.QUEUED,343 tags={"foo": "other"},344 )345 create_run(346 instance,347 run_id="run-4",348 status=PipelineRunStatus.QUEUED,349 tags={"foo": "other-2"},350 )351 list(daemon.run_iteration(instance, workspace))352 assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]353 with instance_for_queued_run_coordinator(354 max_concurrent_runs=10,355 tag_concurrency_limits=[356 {"key": "foo", "limit": 2, "value": {"applyLimitPerUniqueValue": True}},357 {"key": "foo", "limit": 1, "value": "bar"},358 ],359 ) as instance:360 create_run(361 instance,362 run_id="run-1",363 status=PipelineRunStatus.QUEUED,364 tags={"foo": "bar"},365 )366 create_run(367 instance,368 run_id="run-2",369 status=PipelineRunStatus.QUEUED,370 tags={"foo": "baz"},371 )372 create_run(373 instance,374 run_id="run-3",375 status=PipelineRunStatus.QUEUED,376 tags={"foo": "bar"},377 )378 create_run(379 instance,380 run_id="run-4",381 status=PipelineRunStatus.QUEUED,382 tags={"foo": "baz"},383 )384 create_run(385 instance,386 run_id="run-5",387 status=PipelineRunStatus.QUEUED,388 tags={"foo": "baz"},389 )390 list(daemon.run_iteration(instance, workspace))391 assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-2", "run-4"]392def test_locations_not_created(instance, monkeypatch, workspace, daemon):393 """394 Verifies that no repository location is created when runs are dequeued395 """396 create_run(397 instance,398 run_id="queued-run",399 status=PipelineRunStatus.QUEUED,400 )401 create_run(402 instance,403 run_id="queued-run-2",404 status=PipelineRunStatus.QUEUED,405 )406 original_method = GrpcServerRepositoryLocation.__init__407 method_calls = []408 def mocked_location_init(409 self,410 origin,411 host=None,412 port=None,413 socket=None,414 server_id=None,415 heartbeat=False,416 watch_server=True,417 grpc_server_registry=None,418 ):419 method_calls.append(origin)420 return original_method(421 self,422 origin,423 host,424 port,425 socket,426 server_id,427 heartbeat,428 watch_server,429 grpc_server_registry,430 )431 monkeypatch.setattr(432 GrpcServerRepositoryLocation,433 "__init__",434 mocked_location_init,435 )436 list(daemon.run_iteration(instance, workspace))437 assert get_run_ids(instance.run_launcher.queue()) == ["queued-run", "queued-run-2"]438 assert len(method_calls) == 0439def test_skip_error_runs(instance, workspace, daemon):440 create_run(441 instance,442 run_id="bad-run",443 status=PipelineRunStatus.QUEUED,444 )445 create_run(446 instance,447 run_id="good-run",448 status=PipelineRunStatus.QUEUED,449 )450 errors = [error for error in list(daemon.run_iteration(instance, workspace)) if error]451 assert len(errors) == 1452 assert "Bad run bad-run" in errors[0].message453 assert get_run_ids(instance.run_launcher.queue()) == ["good-run"]454 assert instance.get_run_by_id("bad-run").status == PipelineRunStatus.FAILURE455def test_key_limit_with_extra_tags(workspace, daemon):456 with instance_for_queued_run_coordinator(457 max_concurrent_runs=2,458 tag_concurrency_limits=[459 {"key": "test", "limit": 1},460 ],461 ) as instance:462 create_run(463 instance,464 run_id="run-1",465 status=PipelineRunStatus.QUEUED,466 tags={"other-tag": "value", "test": "value"},467 )468 create_run(469 instance,470 run_id="run-2",471 status=PipelineRunStatus.QUEUED,472 tags={"other-tag": "value", "test": "value"},473 )474 list(daemon.run_iteration(instance, workspace))...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

...25 table.upload_teardown()26 return27 start_byte = event['start_byte']28 end_byte = event['end_byte']29 run_ids = list(nucleotide_index.get_run_ids(start_byte, end_byte))30 nucleotide_batch = NucleotideBatch(run_ids, start_byte)31 nucleotide_batch.process()32 return33def handler_protein(event, context):34 if (event.get('clear', False)):35 print('resetting tables and data')36 for table in ProteinBatch([], 0).tables.values():37 table.upload_teardown()38 return39 start_byte = event['start_byte']40 end_byte = event['end_byte']41 run_ids = list(protein_index.get_run_ids(start_byte, end_byte))42 protein_batch = ProteinBatch(run_ids, start_byte)43 protein_batch.process()44 return45def handler_rdrp(event, context):46 if (event.get('clear', False)):47 print('resetting tables and data')48 for table in RdrpBatch([], 0).tables.values():49 table.upload_teardown()50 return51 start_byte = event['start_byte']52 end_byte = event['end_byte']53 run_ids = list(rdrp_index.get_run_ids(start_byte, end_byte))54 rdrp_batch = RdrpBatch(run_ids, start_byte)55 rdrp_batch.process()...

Full Screen

Full Screen

prepare_tempest_testrepository.py

Source:prepare_tempest_testrepository.py Github

copy

Full Screen

...27 TEMPEST_PATH = sys.argv[1]28 DB_URI = sys.argv[2]29else:30 TEMPEST_PATH = '/opt/stack/new/tempest'31def get_run_ids(session):32 # TODO(mtreinish): Move this function into the subunit2sql db api33 results = db_utils.model_query(models.Run, session).order_by(34 models.Run.run_at.desc()).filter_by(fails=0).limit(10).all()35 return map(lambda x: x.id, results)36def main():37 shell.parse_args([])38 shell.CONF.set_override('connection', DB_URI, group='database')39 session = api.get_session()40 run_ids = get_run_ids(session)41 session.close()42 preseed_path = os.path.join(TEMPEST_PATH, 'preseed-streams')43 os.mkdir(preseed_path)44 for run in run_ids:45 with open(os.path.join(preseed_path, run + '.subunit'), 'w') as fd:46 write_subunit.sql2subunit(run, fd)47if __name__ == '__main__':...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run stestr automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful