How to use stop_instances method in localstack

Best Python code snippet using localstack_python

test_update_pinned_instances.py

Source:test_update_pinned_instances.py Github

copy

Full Screen

1import pytest2from tests.integration.aurorabridge_test.client import api3from tests.integration.aurorabridge_test.util import (4 get_job_update_request,5 wait_for_killed,6 wait_for_rolled_forward,7)8pytestmark = [pytest.mark.default, pytest.mark.aurorabridge]9def test__update_with_pinned_instances(client):10 """11 test basic pinned instance deployment:12 1. start a regular update (version 1) on all instances13 2. start another update (version 2) targeting subset of instances,14 expect only targeted instances to be updated15 3. start regular update (version 1) again on all instances, expect16 only instances affected by previous step to be updated17 """18 # start a regular update19 res = client.start_job_update(20 get_job_update_request("test_dc_labrat_large_job.yaml"),21 "start job update test/dc/labrat_large_job",22 )23 wait_for_rolled_forward(client, res.key)24 job_key = res.key.job25 res = client.get_tasks_without_configs(26 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})27 )28 assert len(res.tasks) == 1029 for t in res.tasks:30 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)31 assert run_id == "1"32 assert len(t.assignedTask.task.metadata) == 233 for m in t.assignedTask.task.metadata:34 if m.key == "test_key_1":35 assert m.value == "test_value_1"36 elif m.key == "test_key_2":37 assert m.value == "test_value_2"38 else:39 assert False, "unexpected metadata %s" % m40 # start a update with updateOnlyTheseInstances parameter41 update_instances = [0, 2, 3, 7, 9]42 pinned_req = get_job_update_request(43 "test_dc_labrat_large_job_diff_labels.yaml"44 )45 pinned_req.settings.updateOnlyTheseInstances = set(46 [api.Range(first=i, last=i) for i in update_instances]47 )48 res = client.start_job_update(49 pinned_req,50 "start job update test/dc/labrat_large_job with pinned instances",51 )52 wait_for_rolled_forward(client, res.key)53 job_key = res.key.job54 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))55 assert len(res.detailsList) == 156 assert len(res.detailsList[0].instanceEvents) > 057 for ie in res.detailsList[0].instanceEvents:58 assert ie.instanceId in update_instances59 res = client.get_tasks_without_configs(60 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})61 )62 assert len(res.tasks) == 1063 for t in res.tasks:64 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)65 if t.assignedTask.instanceId in update_instances:66 assert run_id == "2"67 assert len(t.assignedTask.task.metadata) == 268 for m in t.assignedTask.task.metadata:69 if m.key == "test_key_11":70 assert m.value == "test_value_11"71 elif m.key == "test_key_22":72 assert m.value == "test_value_22"73 else:74 assert False, (75 "unexpected metadata %s for affected instances" % m76 )77 else:78 assert run_id == "1"79 assert len(t.assignedTask.task.metadata) == 280 for m in t.assignedTask.task.metadata:81 if m.key == "test_key_1":82 assert m.value == "test_value_1"83 elif m.key == "test_key_2":84 assert m.value == "test_value_2"85 else:86 assert False, (87 "unexpected metadata %s for unaffected instances" % m88 )89 # start a regular update again should affect instances updated in90 # previous request91 res = client.start_job_update(92 get_job_update_request("test_dc_labrat_large_job_diff_executor.yaml"),93 "start job update test/dc/labrat_large_job again (with executor data order diff)",94 )95 wait_for_rolled_forward(client, res.key)96 job_key = res.key.job97 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))98 assert len(res.detailsList) == 199 assert len(res.detailsList[0].instanceEvents) > 0100 for ie in res.detailsList[0].instanceEvents:101 assert ie.instanceId in update_instances102 res = client.get_tasks_without_configs(103 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})104 )105 assert len(res.tasks) == 10106 for t in res.tasks:107 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)108 assert len(t.assignedTask.task.metadata) == 2109 for m in t.assignedTask.task.metadata:110 if m.key == "test_key_1":111 assert m.value == "test_value_1"112 elif m.key == "test_key_2":113 assert m.value == "test_value_2"114 else:115 assert False, "unexpected metadata %s" % m116 if t.assignedTask.instanceId in update_instances:117 assert run_id == "3"118 else:119 assert run_id == "1"120def test__update_with_pinned_instances__add_remove_instance(client):121 """122 test pinned instance deployment with add / remove instances:123 1. start a regular update (version 1) on all instances124 2. start another update (version 2) targeting subset of instances,125 while adding instances, expect only add and targeted instances126 to be updated127 3. start regular update (version 1) again on all instances, while128 removing instances, expect only instances affected by previous129 step to be updated and additional instances removed130 """131 all_instances = set(range(8))132 # start a regular update133 req = get_job_update_request("test_dc_labrat_large_job.yaml")134 req.instanceCount = 8135 res = client.start_job_update(136 req, "start job update test/dc/labrat_large_job"137 )138 wait_for_rolled_forward(client, res.key)139 job_key = res.key.job140 res = client.get_tasks_without_configs(141 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})142 )143 assert len(res.tasks) == 8144 for t in res.tasks:145 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)146 assert run_id == "1"147 assert len(t.assignedTask.task.metadata) == 2148 for m in t.assignedTask.task.metadata:149 if m.key == "test_key_1":150 assert m.value == "test_value_1"151 elif m.key == "test_key_2":152 assert m.value == "test_value_2"153 else:154 assert False, "unexpected metadata %s" % m155 # start a update with updateOnlyTheseInstances parameter,156 # and add instances157 update_instances = set([0, 2, 3, 8, 9])158 pinned_req = get_job_update_request(159 "test_dc_labrat_large_job_diff_labels.yaml"160 )161 pinned_req.settings.updateOnlyTheseInstances = set(162 [api.Range(first=i, last=i) for i in update_instances]163 )164 res = client.start_job_update(165 pinned_req,166 "start job update test/dc/labrat_large_job with pinned instances",167 )168 wait_for_rolled_forward(client, res.key)169 job_key = res.key.job170 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))171 assert len(res.detailsList) == 1172 assert len(res.detailsList[0].instanceEvents) > 0173 for ie in res.detailsList[0].instanceEvents:174 assert ie.instanceId in update_instances175 res = client.get_tasks_without_configs(176 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})177 )178 assert len(res.tasks) == 10179 for t in res.tasks:180 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)181 if t.assignedTask.instanceId in update_instances:182 if t.assignedTask.instanceId in all_instances:183 assert run_id == "2"184 else:185 assert run_id == "1"186 assert len(t.assignedTask.task.metadata) == 2187 for m in t.assignedTask.task.metadata:188 if m.key == "test_key_11":189 assert m.value == "test_value_11"190 elif m.key == "test_key_22":191 assert m.value == "test_value_22"192 else:193 assert False, (194 "unexpected metadata %s for affected instances" % m195 )196 else:197 assert run_id == "1"198 assert len(t.assignedTask.task.metadata) == 2199 for m in t.assignedTask.task.metadata:200 if m.key == "test_key_1":201 assert m.value == "test_value_1"202 elif m.key == "test_key_2":203 assert m.value == "test_value_2"204 else:205 assert False, (206 "unexpected metadata %s for unaffected instances" % m207 )208 # start a regular update again should affect instances updated in209 # previous request, and remove instances210 req = get_job_update_request("test_dc_labrat_large_job_diff_executor.yaml")211 req.instanceCount = 8212 res = client.start_job_update(213 req,214 "start job update test/dc/labrat_large_job again (with executor data order diff)",215 )216 wait_for_rolled_forward(client, res.key)217 job_key = res.key.job218 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))219 assert len(res.detailsList) == 1220 assert len(res.detailsList[0].instanceEvents) > 0221 for ie in res.detailsList[0].instanceEvents:222 assert ie.instanceId in update_instances223 res = client.get_tasks_without_configs(224 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})225 )226 assert len(res.tasks) == 8227 for t in res.tasks:228 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)229 assert len(t.assignedTask.task.metadata) == 2230 for m in t.assignedTask.task.metadata:231 if m.key == "test_key_1":232 assert m.value == "test_value_1"233 elif m.key == "test_key_2":234 assert m.value == "test_value_2"235 else:236 assert False, "unexpected metadata %s" % m237 if t.assignedTask.instanceId in (update_instances & all_instances):238 assert run_id == "3"239 else:240 assert run_id == "1"241def test__update_with_pinned_instances__stopped_instances(client):242 """243 test pinned instance deployment with stopped instances:244 1. start a regular update (version 1) on all instances245 2. stop subset of instances246 3. start another update (version 2) targeting subset of instances247 (stopped instances not included), expect only targeted instances248 to be updated and stopped instances remain stopped249 """250 all_instances = set([i for i in xrange(10)])251 # start a regular update252 res = client.start_job_update(253 get_job_update_request("test_dc_labrat_large_job.yaml"),254 "start job update test/dc/labrat_large_job",255 )256 wait_for_rolled_forward(client, res.key)257 job_key = res.key.job258 res = client.get_tasks_without_configs(259 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})260 )261 assert len(res.tasks) == len(all_instances)262 for t in res.tasks:263 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)264 assert run_id == "1"265 assert len(t.assignedTask.task.metadata) == 2266 for m in t.assignedTask.task.metadata:267 if m.key == "test_key_1":268 assert m.value == "test_value_1"269 elif m.key == "test_key_2":270 assert m.value == "test_value_2"271 else:272 assert False, "unexpected metadata %s" % m273 # stop subset of instances274 stop_instances = set([1, 6])275 client.kill_tasks(276 job_key,277 stop_instances,278 "killing instance 1, 6 for job test/dc/labrat_large_job",279 )280 wait_for_killed(client, job_key, stop_instances)281 res = client.get_tasks_without_configs(282 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})283 )284 assert len(res.tasks) == len(all_instances - stop_instances)285 for t in res.tasks:286 assert t.assignedTask.instanceId in (all_instances - stop_instances)287 # start a update with updateOnlyTheseInstances parameter288 update_instances = set([0, 2, 3, 7, 9])289 pinned_req = get_job_update_request(290 "test_dc_labrat_large_job_diff_labels.yaml"291 )292 pinned_req.settings.updateOnlyTheseInstances = set(293 [api.Range(first=i, last=i) for i in update_instances]294 )295 res = client.start_job_update(296 pinned_req,297 "start job update test/dc/labrat_large_job with pinned instances",298 )299 wait_for_rolled_forward(client, res.key)300 job_key = res.key.job301 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))302 assert len(res.detailsList) == 1303 assert len(res.detailsList[0].instanceEvents) > 0304 for ie in res.detailsList[0].instanceEvents:305 assert ie.instanceId in update_instances306 res = client.get_tasks_without_configs(307 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})308 )309 assert len(res.tasks) == len(all_instances - stop_instances)310 # expect instance 0, 2, 3, 7, 9 to be updated to newer version, with run id 2311 # expect instance 1, 6 remain at stopped312 # expect instance 4, 5, 8 remain at original version, with run id 1313 for t in res.tasks:314 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)315 if t.assignedTask.instanceId in update_instances:316 assert run_id == "2"317 assert len(t.assignedTask.task.metadata) == 2318 for m in t.assignedTask.task.metadata:319 if m.key == "test_key_11":320 assert m.value == "test_value_11"321 elif m.key == "test_key_22":322 assert m.value == "test_value_22"323 else:324 assert False, (325 "unexpected metadata %s for affected instances" % m326 )327 elif t.assignedTask.instanceId in (all_instances - stop_instances):328 assert run_id == "1"329 assert len(t.assignedTask.task.metadata) == 2330 for m in t.assignedTask.task.metadata:331 if m.key == "test_key_1":332 assert m.value == "test_value_1"333 elif m.key == "test_key_2":334 assert m.value == "test_value_2"335 else:336 assert False, (337 "unexpected metadata %s for unaffected instances" % m338 )339 else:340 assert False, (341 "unexpected instance id %s: should be stopped"342 % t.assignedTask.instanceId343 )344def test__update_with_pinned_instances__start_stopped_instances(client):345 """346 test pinned instance deployment with stop / start instances:347 1. start a regular update (version 1) on all instances348 2. stop subset of instances349 3. start the same update (version 1) targeting subset of instances350 (stopped instances included), expect only stopped instances351 to start running, others unaffected352 4. start regular update (version 1) again on all instances, expect353 no change on all instances354 """355 all_instances = set([i for i in xrange(10)])356 # start a regular update357 res = client.start_job_update(358 get_job_update_request("test_dc_labrat_large_job.yaml"),359 "start job update test/dc/labrat_large_job",360 )361 wait_for_rolled_forward(client, res.key)362 job_key = res.key.job363 res = client.get_tasks_without_configs(364 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})365 )366 assert len(res.tasks) == len(all_instances)367 for t in res.tasks:368 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)369 assert run_id == "1"370 assert len(t.assignedTask.task.metadata) == 2371 for m in t.assignedTask.task.metadata:372 if m.key == "test_key_1":373 assert m.value == "test_value_1"374 elif m.key == "test_key_2":375 assert m.value == "test_value_2"376 else:377 assert False, "unexpected metadata %s" % m378 # stop subset of instances379 stop_instances = set([2, 8])380 client.kill_tasks(381 job_key,382 stop_instances,383 "killing instance 2, 8 for job test/dc/labrat_large_job",384 )385 wait_for_killed(client, job_key, stop_instances)386 res = client.get_tasks_without_configs(387 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})388 )389 assert len(res.tasks) == len(all_instances - stop_instances)390 for t in res.tasks:391 assert t.assignedTask.instanceId in (all_instances - stop_instances)392 # start a update with updateOnlyTheseInstances parameter393 # expect stopped instances to be started, others unchanged394 update_instances = set([2, 3, 5, 8])395 pinned_req = get_job_update_request(396 "test_dc_labrat_large_job_diff_executor.yaml"397 )398 pinned_req.settings.updateOnlyTheseInstances = set(399 [api.Range(first=i, last=i) for i in update_instances]400 )401 res = client.start_job_update(402 pinned_req,403 "start job update test/dc/labrat_large_job with pinned instances",404 )405 wait_for_rolled_forward(client, res.key)406 job_key = res.key.job407 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))408 assert len(res.detailsList) == 1409 assert len(res.detailsList[0].instanceEvents) > 0410 for ie in res.detailsList[0].instanceEvents:411 assert ie.instanceId in (update_instances & stop_instances)412 res = client.get_tasks_without_configs(413 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})414 )415 assert len(res.tasks) == len(all_instances)416 for t in res.tasks:417 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)418 if t.assignedTask.instanceId in stop_instances:419 assert run_id == "2"420 elif t.assignedTask.instanceId in (all_instances - stop_instances):421 assert run_id == "1"422 else:423 assert False, (424 "unexpected instance id %s" % t.assignedTask.instanceId425 )426 assert len(t.assignedTask.task.metadata) == 2427 for m in t.assignedTask.task.metadata:428 if m.key == "test_key_1":429 assert m.value == "test_value_1"430 elif m.key == "test_key_2":431 assert m.value == "test_value_2"432 else:433 assert False, (434 "unexpected metadata %s for affected instances" % m435 )436 # start the regular update again same as the first one437 # expect no change for all instances438 res = client.start_job_update(439 get_job_update_request("test_dc_labrat_large_job.yaml"),440 "start third job update test/dc/labrat_large_job",441 )442 wait_for_rolled_forward(client, res.key)443 job_key = res.key.job444 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))445 assert len(res.detailsList) == 1446 assert len(res.detailsList[0].instanceEvents) == 0447 res = client.get_tasks_without_configs(448 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})449 )450 assert len(res.tasks) == len(all_instances)451 for t in res.tasks:452 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)453 if t.assignedTask.instanceId in stop_instances:454 assert run_id == "2"455 elif t.assignedTask.instanceId in (all_instances - stop_instances):456 assert run_id == "1"457 else:458 assert False, (459 "unexpected instance id %s" % t.assignedTask.instanceId460 )461 assert len(t.assignedTask.task.metadata) == 2462 for m in t.assignedTask.task.metadata:463 if m.key == "test_key_1":464 assert m.value == "test_value_1"465 elif m.key == "test_key_2":466 assert m.value == "test_value_2"467 else:468 assert False, (469 "unexpected metadata %s for affected instances" % m470 )471def test__update_with_pinned_instances__start_stopped_instances_all(client):472 """473 test pinned instance deployment with stop / start all instances:474 1. start a regular update (version 1) on all instances475 2. stop all instances476 3. start the same update (version 1) on all instances (stopped477 instances included), expect all instances to be updated and478 start running479 4. start regular update (version 1) again on all instances, expect480 no change on all instances481 """482 all_instances = set([i for i in xrange(10)])483 # start a regular update484 res = client.start_job_update(485 get_job_update_request("test_dc_labrat_large_job.yaml"),486 "start job update test/dc/labrat_large_job",487 )488 wait_for_rolled_forward(client, res.key)489 job_key = res.key.job490 res = client.get_tasks_without_configs(491 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})492 )493 assert len(res.tasks) == len(all_instances)494 for t in res.tasks:495 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)496 assert run_id == "1"497 assert len(t.assignedTask.task.metadata) == 2498 for m in t.assignedTask.task.metadata:499 if m.key == "test_key_1":500 assert m.value == "test_value_1"501 elif m.key == "test_key_2":502 assert m.value == "test_value_2"503 else:504 assert False, "unexpected metadata %s" % m505 # stop all instances506 stop_instances = set([i for i in xrange(10)])507 client.kill_tasks(508 job_key,509 stop_instances,510 "killing all instances for job test/dc/labrat_large_job",511 )512 wait_for_killed(client, job_key, stop_instances)513 res = client.get_tasks_without_configs(514 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})515 )516 assert len(res.tasks) == 0517 # start a update without updateOnlyTheseInstances parameter518 # expect all instances to be started519 update_instances = set([i for i in xrange(10)])520 res = client.start_job_update(521 get_job_update_request("test_dc_labrat_large_job_diff_executor.yaml"),522 "start second job update test/dc/labrat_large_job",523 )524 wait_for_rolled_forward(client, res.key)525 job_key = res.key.job526 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))527 assert len(res.detailsList) == 1528 assert len(res.detailsList[0].instanceEvents) > 0529 for ie in res.detailsList[0].instanceEvents:530 assert ie.instanceId in (update_instances & stop_instances)531 res = client.get_tasks_without_configs(532 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})533 )534 assert len(res.tasks) == len(all_instances)535 for t in res.tasks:536 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)537 assert run_id == "2"538 assert len(t.assignedTask.task.metadata) == 2539 for m in t.assignedTask.task.metadata:540 if m.key == "test_key_1":541 assert m.value == "test_value_1"542 elif m.key == "test_key_2":543 assert m.value == "test_value_2"544 else:545 assert False, (546 "unexpected metadata %s for affected instances" % m547 )548 # start the regular update again same as the first one549 # expect no change for all instances550 res = client.start_job_update(551 get_job_update_request("test_dc_labrat_large_job.yaml"),552 "start third job update test/dc/labrat_large_job",553 )554 wait_for_rolled_forward(client, res.key)555 job_key = res.key.job556 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))557 assert len(res.detailsList) == 1558 assert len(res.detailsList[0].instanceEvents) == 0559 res = client.get_tasks_without_configs(560 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})561 )562 assert len(res.tasks) == len(all_instances)563 for t in res.tasks:564 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)565 if t.assignedTask.instanceId in stop_instances:566 assert run_id == "2"567 elif t.assignedTask.instanceId in (all_instances - stop_instances):568 assert run_id == "1"569 else:570 assert False, (571 "unexpected instance id %s" % t.assignedTask.instanceId572 )573 assert len(t.assignedTask.task.metadata) == 2574 for m in t.assignedTask.task.metadata:575 if m.key == "test_key_1":576 assert m.value == "test_value_1"577 elif m.key == "test_key_2":578 assert m.value == "test_value_2"579 else:580 assert False, (581 "unexpected metadata %s for affected instances" % m582 )583def test__update_with_pinned_instances__deploy_stopped_instances(client):584 """585 test pinned instance deployment with stop / deploy instances:586 1. start a regular update (version 1) on all instances587 2. stop subset of instances588 3. start a new update (version 2) targeting subset of instances589 (stopped instances included), expect stopped instances to be590 brought up with new version and other targeted instances to591 be updated592 4. start regular update (version 1) again on all instances, expect593 only instances affected by previous step to be updated594 """595 all_instances = set([i for i in xrange(10)])596 # start a regular update597 res = client.start_job_update(598 get_job_update_request("test_dc_labrat_large_job.yaml"),599 "start job update test/dc/labrat_large_job",600 )601 wait_for_rolled_forward(client, res.key)602 job_key = res.key.job603 res = client.get_tasks_without_configs(604 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})605 )606 assert len(res.tasks) == len(all_instances)607 for t in res.tasks:608 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)609 assert run_id == "1"610 assert len(t.assignedTask.task.metadata) == 2611 for m in t.assignedTask.task.metadata:612 if m.key == "test_key_1":613 assert m.value == "test_value_1"614 elif m.key == "test_key_2":615 assert m.value == "test_value_2"616 else:617 assert False, "unexpected metadata %s" % m618 # stop subset of instances619 stop_instances = set([2, 8])620 client.kill_tasks(621 job_key,622 stop_instances,623 "killing instance 2, 8 for job test/dc/labrat_large_job",624 )625 wait_for_killed(client, job_key, stop_instances)626 res = client.get_tasks_without_configs(627 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})628 )629 assert len(res.tasks) == len(all_instances - stop_instances)630 for t in res.tasks:631 assert t.assignedTask.instanceId in (all_instances - stop_instances)632 # start a update with updateOnlyTheseInstances parameter633 # expect stopped instances to be started634 update_instances = set([2, 3, 5, 8])635 pinned_req = get_job_update_request(636 "test_dc_labrat_large_job_diff_labels.yaml"637 )638 pinned_req.settings.updateOnlyTheseInstances = set(639 [api.Range(first=i, last=i) for i in update_instances]640 )641 res = client.start_job_update(642 pinned_req,643 "start second job update test/dc/labrat_large_job with pinned instances and label diff",644 )645 wait_for_rolled_forward(client, res.key)646 job_key = res.key.job647 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))648 assert len(res.detailsList) == 1649 assert len(res.detailsList[0].instanceEvents) > 0650 for ie in res.detailsList[0].instanceEvents:651 assert ie.instanceId in update_instances652 res = client.get_tasks_without_configs(653 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})654 )655 assert len(res.tasks) == len(all_instances)656 for t in res.tasks:657 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)658 assert len(t.assignedTask.task.metadata) == 2659 if t.assignedTask.instanceId in update_instances:660 assert run_id == "2"661 for m in t.assignedTask.task.metadata:662 if m.key == "test_key_11":663 assert m.value == "test_value_11"664 elif m.key == "test_key_22":665 assert m.value == "test_value_22"666 else:667 assert False, (668 "unexpected metadata %s for affected instances" % m669 )670 elif t.assignedTask.instanceId in (all_instances - update_instances):671 assert run_id == "1"672 for m in t.assignedTask.task.metadata:673 if m.key == "test_key_1":674 assert m.value == "test_value_1"675 elif m.key == "test_key_2":676 assert m.value == "test_value_2"677 else:678 assert False, (679 "unexpected metadata %s for affected instances" % m680 )681 else:682 assert False, (683 "unexpected instance id %s" % t.assignedTask.instanceId684 )685 # start the regular update again same as the first one686 # expect changes only for instances updated by previous update687 res = client.start_job_update(688 get_job_update_request("test_dc_labrat_large_job_diff_executor.yaml"),689 "start third job update test/dc/labrat_large_job",690 )691 wait_for_rolled_forward(client, res.key)692 job_key = res.key.job693 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))694 assert len(res.detailsList) == 1695 assert len(res.detailsList[0].instanceEvents) > 0696 for ie in res.detailsList[0].instanceEvents:697 assert ie.instanceId in update_instances698 res = client.get_tasks_without_configs(699 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})700 )701 assert len(res.tasks) == len(all_instances)702 for t in res.tasks:703 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)704 assert len(t.assignedTask.task.metadata) == 2705 if t.assignedTask.instanceId in update_instances:706 assert run_id == "3"707 elif t.assignedTask.instanceId in (all_instances - update_instances):708 assert run_id == "1"709 else:710 assert False, (711 "unexpected instance id %s" % t.assignedTask.instanceId712 )713 for m in t.assignedTask.task.metadata:714 if m.key == "test_key_1":715 assert m.value == "test_value_1"716 elif m.key == "test_key_2":717 assert m.value == "test_value_2"718 else:719 assert False, (720 "unexpected metadata %s for affected instances" % m721 )722def test__update_with_pinned_instances__deploy_stopped_instances_mixed(client):723 """724 test pinned instance deployment with mixed version and instance state725 1. start a regular update (version 1) on all instances726 2. stop subset of instances727 3. start a new update (version 2) targeting subset of instances728 (some of stopped instances included), expect targeted instances729 to be either brought up with newer version or updated with new730 version731 4. start regular update (version 1) again on another set of instances732 (some of previously stopped instances included, some of instances733 updated in previous step included), expect only stopped and734 instances affected previous step to be either brought up or updated735 """736 all_instances = set([i for i in xrange(10)])737 # start a regular update738 res = client.start_job_update(739 get_job_update_request("test_dc_labrat_large_job.yaml"),740 "start job update test/dc/labrat_large_job",741 )742 wait_for_rolled_forward(client, res.key)743 job_key = res.key.job744 res = client.get_tasks_without_configs(745 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})746 )747 assert len(res.tasks) == len(all_instances)748 for t in res.tasks:749 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)750 assert run_id == "1"751 assert len(t.assignedTask.task.metadata) == 2752 for m in t.assignedTask.task.metadata:753 if m.key == "test_key_1":754 assert m.value == "test_value_1"755 elif m.key == "test_key_2":756 assert m.value == "test_value_2"757 else:758 assert False, "unexpected metadata %s" % m759 # stop subset of instances760 stop_instances = set([2, 8])761 client.kill_tasks(762 job_key,763 stop_instances,764 "killing instance 2, 8 for job test/dc/labrat_large_job",765 )766 wait_for_killed(client, job_key, stop_instances)767 res = client.get_tasks_without_configs(768 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})769 )770 assert len(res.tasks) == len(all_instances - stop_instances)771 for t in res.tasks:772 assert t.assignedTask.instanceId in (all_instances - stop_instances)773 # start a update with updateOnlyTheseInstances parameter774 # expected only instances which targeted by updateOnlyTheseInstances775 # to be updated, within which stopped ones are started.776 update_instances = set([3, 5, 8])777 pinned_req = get_job_update_request(778 "test_dc_labrat_large_job_diff_labels.yaml"779 )780 pinned_req.settings.updateOnlyTheseInstances = set(781 [api.Range(first=i, last=i) for i in update_instances]782 )783 res = client.start_job_update(784 pinned_req,785 "start second job update test/dc/labrat_large_job with pinned instances and label diff",786 )787 wait_for_rolled_forward(client, res.key)788 job_key = res.key.job789 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))790 assert len(res.detailsList) == 1791 assert len(res.detailsList[0].instanceEvents) > 0792 for ie in res.detailsList[0].instanceEvents:793 assert ie.instanceId in update_instances794 res = client.get_tasks_without_configs(795 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})796 )797 assert len(res.tasks) == len(798 (all_instances - stop_instances) | update_instances799 )800 for t in res.tasks:801 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)802 assert len(t.assignedTask.task.metadata) == 2803 if t.assignedTask.instanceId in update_instances:804 assert run_id == "2"805 for m in t.assignedTask.task.metadata:806 if m.key == "test_key_11":807 assert m.value == "test_value_11"808 elif m.key == "test_key_22":809 assert m.value == "test_value_22"810 else:811 assert False, (812 "unexpected metadata %s for affected instances" % m813 )814 elif t.assignedTask.instanceId in (815 all_instances - stop_instances - update_instances816 ):817 assert run_id == "1"818 for m in t.assignedTask.task.metadata:819 if m.key == "test_key_1":820 assert m.value == "test_value_1"821 elif m.key == "test_key_2":822 assert m.value == "test_value_2"823 else:824 assert False, (825 "unexpected metadata %s for affected instances" % m826 )827 else:828 assert False, (829 "unexpected instance id %s: should be stopped"830 % t.assignedTask.instanceId831 )832 # start the regular update again same as the first one, targeting833 # subset of instances.834 # expect instance start / updated iff the instance has different config835 # or instance is stopped.836 update_2_instances = set([2, 3, 8, 9])837 pinned_req_2 = get_job_update_request(838 "test_dc_labrat_large_job_diff_executor.yaml"839 )840 pinned_req_2.settings.updateOnlyTheseInstances = set(841 [api.Range(first=i, last=i) for i in update_2_instances]842 )843 res = client.start_job_update(844 pinned_req_2, "start third job update test/dc/labrat_large_job"845 )846 wait_for_rolled_forward(client, res.key)847 job_key = res.key.job848 res = client.get_job_update_details(None, api.JobUpdateQuery(key=res.key))849 assert len(res.detailsList) == 1850 assert len(res.detailsList[0].instanceEvents) > 0851 for ie in res.detailsList[0].instanceEvents:852 # exclude instances that are previously running and still on853 # the first update854 assert ie.instanceId in (855 update_2_instances856 - (all_instances - update_instances - stop_instances)857 )858 # Expected instances for each corresponding state:859 #860 # v1s - instances on original job config (v1) and stopped861 # v1r1 - instances on original job config (v1) and running with run id 1862 # v1r2 - instances on original job config (v1) and running with run id 2863 # v1r3 - instances on original job config (v1) and running with run id 3864 # v2r2 - instances on updated job config (v2) and running with run id 2865 #866 # How did we calculate the instance ids?867 #868 # Let T1, T2, T3, T4 be each of the four operations, which are869 # T1 - start original update (v1 job config) for all instances (let it be A)870 # T2 - stop subset of instances (let it be S)871 # T3 - start new update (v2 job config) on subset of instances (let it be U1)872 # T4 - start origin update again (v1 job config) on subset of instances (let it be U2)873 #874 # At T1:875 # v1r1 = A876 #877 # At T2:878 # v1s = S879 # v1r1' = v1r1 - S = A - S880 #881 # At T3:882 # v1s' = v1s - U1 = S - U1883 # v2r1 = (empty set)884 # v2r2 = U1885 # v1r1'' = A - v2r2 - v1s' = A - U1 - (S - U1)886 #887 # At T4:888 # v1s'' = v1s' - U2 = S - U1 - U2889 # v1r2 = U2 & v1s' = U2 & (S - U1)890 # v1r3 = U1 & U2891 # v2r2' = v2r2 - U2 = U1 - U2892 # v1r1''' = A - v1s'' - v1r2 - v1r3 - v2r2'893 v1s = stop_instances - update_instances - update_2_instances894 v1r2 = update_2_instances & (stop_instances - update_instances)895 v1r3 = update_instances & update_2_instances896 v2r2 = update_instances - update_2_instances897 v1r1 = all_instances - v1s - v1r2 - v1r3 - v2r2898 assert not v1s, "should not be any instances remain as stopped"899 assert v1r1, "expect instances to be in version 1 run id 1"900 assert v1r2, "expect instances to be in version 1 run id 2"901 assert v1r3, "expect instances to be in version 1 run id 3"902 assert v2r2, "expect instances to be in version 2 run id 2"903 res = client.get_tasks_without_configs(904 api.TaskQuery(jobKeys={job_key}, statuses={api.ScheduleStatus.RUNNING})905 )906 assert len(res.tasks) == len(all_instances)907 for t in res.tasks:908 _, _, run_id = t.assignedTask.taskId.rsplit("-", 2)909 assert len(t.assignedTask.task.metadata) == 2910 if t.assignedTask.instanceId in v1r1:911 # version 1, run 1912 assert run_id == "1"913 for m in t.assignedTask.task.metadata:914 if m.key == "test_key_1":915 assert m.value == "test_value_1"916 elif m.key == "test_key_2":917 assert m.value == "test_value_2"918 else:919 assert False, (920 "unexpected metadata %s for affected instances" % m921 )922 elif t.assignedTask.instanceId in v1r2:923 # version 1, run 2924 assert run_id == "2"925 for m in t.assignedTask.task.metadata:926 if m.key == "test_key_1":927 assert m.value == "test_value_1"928 elif m.key == "test_key_2":929 assert m.value == "test_value_2"930 else:931 assert False, (932 "unexpected metadata %s for affected instances" % m933 )934 elif t.assignedTask.instanceId in v1r3:935 # version 1, run 3936 assert run_id == "3"937 for m in t.assignedTask.task.metadata:938 if m.key == "test_key_1":939 assert m.value == "test_value_1"940 elif m.key == "test_key_2":941 assert m.value == "test_value_2"942 else:943 assert False, (944 "unexpected metadata %s for affected instances" % m945 )946 elif t.assignedTask.instanceId in v2r2:947 # version 2, run 2948 assert run_id == "2"949 for m in t.assignedTask.task.metadata:950 if m.key == "test_key_11":951 assert m.value == "test_value_11"952 elif m.key == "test_key_22":953 assert m.value == "test_value_22"954 else:955 assert False, (956 "unexpected metadata %s for affected instances" % m957 )958 else:959 assert False, (960 "unexpected instance id %s" % t.assignedTask.instanceId...

Full Screen

Full Screen

start-stop-ec2-instances.py

Source:start-stop-ec2-instances.py Github

copy

Full Screen

...11 12 status = response['Reservations'][0]['Instances'][0]['State']['Name']13 if status == 'running':14 print("The instance is ", status)15 client.stop_instances(InstanceIds=[instance])16 else:17 print("Nope it's ", status)18 #print(response)19# x = 020# y = len(instances)21#22# while x < y:23# print(response['Reservations'][x]['OwnerId'])24# x += 125def stop_instances():26 for instance in instances:27 client.stop_instances(InstanceIds=[28 instance,29 ],30 Force=True31 )32#stop_instances()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful