How to use check_disks method in tempest

Best Python code snippet using tempest_python

test_disk_image_format.py

Source:test_disk_image_format.py Github

copy

Full Screen

...43 Storage/3_2_Storage_Disk_Image_Format44 """45 default_disks = {}46 __test__ = False47 def check_disks(self, disks_dict={}):48 """49 Make sure the vm's disks have the expected values. If the parameter50 disks_dict is passed in, the default dictionary is updated. Also make51 sure there's at least one disk in the vm52 :param disks_dict: dictionary str/bool with disk identifier and sparse53 value54 :type disks_dict: dict55 """56 check_disks = self.default_disks.copy()57 check_disks.update(disks_dict)58 for key, sparse in check_disks.iteritems():59 testflow.step("Checking disks format for %s", key)60 vm_disks = config.retrieve_disk_obj(key)61 # Make sure there's at least one disk62 assert vm_disks63 for disk in vm_disks:64 assert disk.get_sparse() == sparse, (65 "Wrong sparse value for disk %s (disk id: %s) expected %s"66 % (disk.get_alias(), disk.get_id(), str(sparse))67 )68@pytest.mark.usefixtures(69 create_test_vms.__name__,70)71class BaseTestDiskImageVms(BaseTestDiskImage):72 """73 Base Test Case with two vms created, with thin and pre-allocated disks74 """75 polarion_test_id = None76 def execute_concurrent_vms(self, fn):77 """78 Concurrent execute function for self.vm_names79 :param fn: function to submit to ThreadPoolExecutor. The function must80 accept only one parameter, the name of the vm81 :type fn: function82 """83 executions = list()84 with ThreadPoolExecutor(max_workers=2) as executor:85 for vm in self.vm_names:86 executions.append(executor.submit(fn, **{"vm": vm}))87 for execution in executions:88 if not execution.result():89 if execution.exception():90 raise execution.exception()91 else:92 raise Exception("Error executing %s" % execution)93 def add_snapshots(self):94 """95 Create a snapshot for each vm in parallel96 """97 testflow.step("Adding snapshots for %s", ", ".join(self.vm_names))98 def addsnapshot(vm):99 return ll_vms.addSnapshot(True, vm, self.snapshot_desc)100 self.execute_concurrent_vms(addsnapshot)101 ll_jobs.wait_for_jobs([config.JOB_CREATE_SNAPSHOT])102 def export_vms(self, discard_snapshots=False):103 """104 Export vms in parallel105 """106 def exportVm(vm):107 status = ll_vms.exportVm(108 True, vm, config.EXPORT_DOMAIN_NAME,109 discard_snapshots=discard_snapshots110 )111 return status112 testflow.step("Export vms %s", ", ".join(self.vm_names))113 self.execute_concurrent_vms(exportVm)114 def import_vms(self, collapse=False):115 """116 Import vms in parallel117 """118 testflow.step("Import vms %s", ", ".join(self.vm_names))119 def importVm(vm):120 return ll_vms.importVm(121 True, vm, config.EXPORT_DOMAIN_NAME, self.storage_domain,122 config.CLUSTER_NAME, collapse=collapse123 )124 self.execute_concurrent_vms(importVm)125 def check_snapshots_collapsed(self):126 """127 Ensure that the snapshots are removed after the import process128 """129 vm_thin_snapshots = [130 snapshot.get_description() for snapshot in131 ll_vms.get_vm_snapshots(self.vm_thin)132 ]133 vm_prealloc_snapshots = [134 snapshot.get_description() for snapshot in135 ll_vms.get_vm_snapshots(self.vm_prealloc)136 ]137 assert self.snapshot_desc not in vm_thin_snapshots138 assert self.snapshot_desc not in vm_prealloc_snapshots139class TestCase11604(BaseTestDiskImageVms):140 """141 Polarion case 11604142 """143 # Bugzilla history:144 # 1251956: Live storage migration is broken145 # 1259785: Error 'Unable to find org.ovirt.engine.core.common.job.Step with146 # id' after live migrate a Virtio RAW disk, job stays in status STARTED147 __test__ = True148 polarion_test_id = '11604'149 @polarion("RHEVM3-11604")150 @tier2151 def test_format_and_snapshots(self):152 """153 Create a snapshot154 * Thin provisioned disk should remain the same155 * Preallocated disk should change to thin provisioned156 """157 self.check_disks()158 self.add_snapshots()159 self.check_disks({self.vm_prealloc: True})160# Bugzilla 1403183 is a duplicate of 1405822 that was openned for this161# test with more info about the cause of failure.162@bz({'1403183': {}})163class TestCase11621(BaseTestDiskImageVms):164 """165 Polarion case 11621166 """167 # Bugzilla history:168 # 1251956: Live storage migration is broken169 # 1259785: Error 'Unable to find org.ovirt.engine.core.common.job.Step with170 # id' after live migrate a Virtio RAW disk, job stays in status STARTED171 __test__ = True172 polarion_test_id = '11621'173 @rhevm_helpers.wait_for_jobs_deco([config.JOB_MOVE_COPY_DISK])174 @polarion("RHEVM3-11621")175 @tier2176 def test_move_disk_offline(self):177 """178 Move the disk179 * Thin provisioned disk should remain the same180 * Preallocated disk should remain the same181 """182 assert ll_disks.move_disk(183 disk_id=self.disk_thin, target_domain=self.storage_domain_1,184 timeout=MOVE_DISK_TIMEOUT185 )186 assert ll_disks.move_disk(187 disk_id=self.disk_prealloc, target_domain=self.storage_domain_1,188 timeout=MOVE_DISK_TIMEOUT189 )190 ll_jobs.wait_for_jobs([config.JOB_MOVE_COPY_DISK])191 self.check_disks()192class TestCase11620(BaseTestDiskImageVms):193 """194 Polarion case 11620195 """196 # Bugzilla history:197 # 1251956: Live storage migration is broken198 # 1259785: Error 'Unable to find org.ovirt.engine.core.common.job.Step with199 # id' after live migrate a Virtio RAW disk, job stays in status STARTED200 __test__ = True201 polarion_test_id = '11620'202 @polarion("RHEVM3-11620")203 @tier3204 def test_add_snapshot_and_move_disk(self):205 """206 Create a snapshot and move the disk207 * Thin provisioned disk should remain the same208 * Preallocated disk should change to thin provisioned209 """210 self.add_snapshots()211 self.check_disks({self.vm_prealloc: True})212 assert ll_disks.move_disk(213 disk_id=self.disk_thin, target_domain=self.storage_domain_1,214 timeout=MOVE_DISK_TIMEOUT215 )216 assert ll_disks.move_disk(217 disk_id=self.disk_prealloc, target_domain=self.storage_domain_1,218 timeout=MOVE_DISK_TIMEOUT219 )220 ll_jobs.wait_for_jobs([config.JOB_MOVE_COPY_DISK])221 self.check_disks({self.vm_prealloc: True})222class TestCase11619(BaseTestDiskImageVms):223 """224 Polarion case 11619225 """226 # Bugzilla history:227 # 1251956: Live storage migration is broken228 # 1259785: Error 'Unable to find org.ovirt.engine.core.common.job.Step with229 # id' after live migrate a Virtio RAW disk, job stays in status STARTED230 __test__ = True231 polarion_test_id = '11619'232 @polarion("RHEVM3-11619")233 @tier2234 def test_live_move_disk(self):235 """236 Start a live disk migration237 * Thin provisioned disk should remain the same238 * Preallocated disk should change to thin provisioned239 """240 ll_vms.start_vms(241 [self.vm_prealloc, self.vm_thin], max_workers=2,242 wait_for_status=config.VM_UP, wait_for_ip=False243 )244 testflow.step(245 "Moving disk %s to storage domain %s",246 self.disk_thin, self.storage_domain_1247 )248 assert ll_disks.move_disk(249 disk_id=self.disk_thin, target_domain=self.storage_domain_1,250 timeout=MOVE_DISK_TIMEOUT251 )252 testflow.step(253 "Moving disk %s to storage domain %s",254 self.disk_prealloc, self.storage_domain_1255 )256 assert ll_disks.move_disk(257 disk_id=self.disk_prealloc, target_domain=self.storage_domain_1,258 timeout=MOVE_DISK_TIMEOUT259 )260 ll_vms.wait_for_disks_status(261 [self.disk_thin, self.disk_prealloc], key='id',262 timeout=MOVE_DISK_TIMEOUT263 )264 ll_jobs.wait_for_jobs([config.JOB_MOVE_COPY_DISK])265 ll_jobs.wait_for_jobs([config.JOB_REMOVE_SNAPSHOT])266 ll_vms.wait_for_vm_snapshots(self.vm_prealloc, config.SNAPSHOT_OK)267 ll_vms.wait_for_vm_snapshots(self.vm_thin, config.SNAPSHOT_OK)268 self.check_disks({self.vm_prealloc: False})269@pytest.mark.usefixtures(270 clean_export_domain.__name__,271)272class ExportVms(BaseTestDiskImageVms):273 """274 Common class for export related cases275 """276 pass277@bz({'1409238': {}})278class TestCase11618(ExportVms):279 """280 Polarion case 11618281 """282 __test__ = False # Because of bug 1409238283 polarion_test_id = '11618'284 @polarion("RHEVM3-11618")285 @tier2286 def test_export_vm(self):287 """288 Export a vm289 * Thin provisioned disk should remain the same290 * Preallocated disk should remain the same291 """292 self.export_vms()293 config.retrieve_disk_obj = lambda w: ll_vms.getVmDisks(294 w, storage_domain=config.EXPORT_DOMAIN_NAME295 )296 self.check_disks()297@bz({'1409238': {}})298class TestCase11617(ExportVms):299 """300 Polarion case 11617301 """302 __test__ = False # Because of bug 1409238303 polarion_test_id = '11617'304 @polarion("RHEVM3-11617")305 @tier2306 def test_add_snapshot_and_export_vm(self):307 """308 Create a snapshot and export the vm309 * Thin provisioned disk in the export domain should remain the same310 * Preallocated disk in the export domain should change to thin311 provision312 """313 self.add_snapshots()314 self.export_vms()315 config.retrieve_disk_obj = lambda w: ll_vms.getVmDisks(316 w, storage_domain=config.EXPORT_DOMAIN_NAME317 )318 self.check_disks({self.vm_prealloc: True})319@bz({'1409238': {}})320class TestCase11616(ExportVms):321 """322 Polarion case 11616323 """324 __test__ = True325 polarion_test_id = '11616'326 @polarion("RHEVM3-11616")327 @tier2328 def test_add_snapshot_export_vm_with_discard_snapshots(self):329 """330 Create a snapshot and export the vm choosing to discard the existing331 snapshots.332 * Thin provisioned disk in the export domain should remain the same333 * Preallocated disk in the export domain should remain the same334 """335 self.add_snapshots()336 self.export_vms(discard_snapshots=True)337 config.retrieve_disk_obj = lambda w: ll_vms.getVmDisks(338 w, storage_domain=config.EXPORT_DOMAIN_NAME339 )340 self.check_disks()341class TestCase11615(ExportVms):342 """343 Polarion case 11615344 """345 __test__ = True346 polarion_test_id = '11615'347 @polarion("RHEVM3-11615")348 @tier2349 def test_import_vm(self):350 """351 Export a vm and import it back352 * Thin provisioned disk should remain the same353 * Preallocated disk should remain the same354 """355 self.export_vms()356 assert ll_vms.removeVms(True, [self.vm_thin, self.vm_prealloc])357 ll_jobs.wait_for_jobs([config.JOB_REMOVE_VM])358 self.import_vms()359 self.check_disks()360class TestCase11614(ExportVms):361 """362 Polarion case 11614363 """364 __test__ = True365 polarion_test_id = '11614'366 @polarion("RHEVM3-11614")367 @tier3368 def test_export_vm_after_snapshot_and_import(self):369 """370 Create snapshot on vm, export the vm and import it back371 * Thin provisioned disk should remain the same372 * Preallocated disk should change to thin provisioned373 """374 self.add_snapshots()375 self.export_vms()376 assert ll_vms.removeVms(True, [self.vm_thin, self.vm_prealloc])377 ll_jobs.wait_for_jobs([config.JOB_REMOVE_VM])378 self.import_vms()379 self.check_disks({self.vm_prealloc: True})380class TestCase11613(ExportVms):381 """382 Polarion case 11613383 """384 __test__ = True385 polarion_test_id = '11613'386 @polarion("RHEVM3-11613")387 @tier2388 def test_export_vm_with_collapse(self):389 """390 Polarion case id: 11613391 Create a snapshot to a vm, export the vm and import choosing to392 collapse the existing snapshots393 * Thin provisioned disk should remain the same394 * Preallocated disk should change to thin provisioned395 """396 self.add_snapshots()397 self.export_vms()398 assert ll_vms.removeVms(True, [self.vm_thin, self.vm_prealloc])399 ll_jobs.wait_for_jobs([config.JOB_REMOVE_VM])400 self.import_vms(collapse=True)401 self.check_snapshots_collapsed()402 self.check_disks({self.vm_prealloc: True})403@pytest.mark.usefixtures(404 create_vm.__name__,405 create_template.__name__,406 clean_export_domain.__name__,407 remove_vm_setup.__name__,408 remove_vms.__name__,409)410class TestCasesImportVmLinked(BaseTestDiskImage):411 """412 Collection for test cases with one vm imported413 """414 config.retrieve_disk_obj = lambda x: ll_vms.getVmDisks(x)415class TestCase11612(TestCasesImportVmLinked):416 """417 Polarion case 11612418 """419 __test__ = True420 polarion_test_id = '11612'421 @polarion("RHEVM3-11612")422 @tier3423 def test_import_link_to_template(self):424 """425 Create a vm from a thin provisioned template, export the vm and426 re-import it back427 * Thin provisioned disk should remain the same428 """429 assert ll_vms.cloneVmFromTemplate(430 True, self.vm_name, self.template_name, config.CLUSTER_NAME,431 clone=False, vol_sparse=True, vol_format=config.COW_DISK432 )433 assert ll_vms.exportVm(True, self.vm_name, config.EXPORT_DOMAIN_NAME)434 assert ll_vms.removeVm(True, self.vm_name)435 ll_jobs.wait_for_jobs([config.JOB_REMOVE_VM])436 assert ll_vms.importVm(437 True, self.vm_name, config.EXPORT_DOMAIN_NAME, self.storage_domain,438 config.CLUSTER_NAME439 )440 self.vm_names.append(self.vm_name)441 self.check_disks()442class TestCase11611(TestCasesImportVmLinked):443 """444 Polarion case 11611445 """446 __test__ = True447 polarion_test_id = '11611'448 @polarion("RHEVM3-11611")449 @tier3450 def test_import_link_to_template_collapse(self):451 """452 Create a vm from a thin provisioned template, export the vm and the453 template, remove both of them and import the vm back454 * Thin provisioned disk should remain the same455 """456 assert ll_vms.cloneVmFromTemplate(457 True, self.vm_name, self.template_name, config.CLUSTER_NAME,458 clone=False, vol_sparse=True, vol_format=config.COW_DISK459 )460 assert ll_templates.exportTemplate(461 True, self.template_name, config.EXPORT_DOMAIN_NAME, wait=True462 )463 self.remove_exported_template = True464 assert ll_vms.exportVm(True, self.vm_name, config.EXPORT_DOMAIN_NAME)465 assert ll_vms.removeVm(True, self.vm_name)466 ll_jobs.wait_for_jobs([ENUMS['job_remove_vm']])467 assert ll_templates.remove_template(True, self.template_name)468 assert ll_vms.importVm(469 True, self.vm_name, config.EXPORT_DOMAIN_NAME, self.storage_domain,470 config.CLUSTER_NAME, collapse=True471 )472 self.check_disks()473@pytest.mark.usefixtures(474 remove_vms.__name__,475 clean_export_domain.__name__,476)477class TestCasesImportVmWithNewName(BaseTestDiskImageVms):478 """479 Check disk images' format after importing the vm without removing the480 original vm used in the export process481 """482 def import_vm_with_new_name(self):483 """484 Export the thin provisioned and preallocated disk vms, then import them485 with a new name486 """487 self.new_vm_thin = "new_%s" % self.vm_thin488 self.new_vm_prealloc = "new_%s" % self.vm_prealloc489 self.export_vms()490 assert ll_vms.importVm(491 True, self.vm_thin, config.EXPORT_DOMAIN_NAME, self.storage_domain,492 config.CLUSTER_NAME, name=self.new_vm_thin493 )494 self.vm_names.append(self.new_vm_thin)495 assert ll_vms.importVm(496 True, self.vm_prealloc, config.EXPORT_DOMAIN_NAME,497 self.storage_domain, config.CLUSTER_NAME, name=self.new_vm_prealloc498 )499 self.vm_names.append(self.new_vm_prealloc)500class TestCase11610(TestCasesImportVmWithNewName):501 """502 Polarion case 11610503 """504 __test__ = True505 polarion_test_id = '11610'506 @polarion("RHEVM3-11610")507 @tier2508 def test_import_vm_without_removing_old_vm(self):509 """510 Import a vm without removing the original vm used in the export511 process512 * Thin provisioned disk should remain the same513 * Preallocated disk should change to thin provisioned514 """515 self.import_vm_with_new_name()516class TestCase11609(TestCasesImportVmWithNewName):517 """518 Polarion case 11609519 """520 __test__ = True521 polarion_test_id = '11609'522 @polarion("RHEVM3-11609")523 @tier3524 def test_import_vm_without_removing_old_vm_with_snapshot(self):525 """526 Create a snapshot to a vm, export the vm and import without removing527 the original vm used in the export process528 * Thin provisioned disk should remain the same529 * Preallocated disk should change to thin provisioned530 """531 self.add_snapshots()532 self.import_vm_with_new_name()533 self.check_disks({self.vm_prealloc: True})534@pytest.mark.usefixtures(535 remove_test_templates.__name__,536)537class TestCasesCreateTemplate(BaseTestDiskImageVms):538 """539 Verify the disk images' format of a template540 """541 template_thin_name = "%s_template_thin"542 template_preallocated_name = "%s_template_preallocated"543 # Bugzilla history:544 # 1257240: Template's disk format is wrong545 def create_template_from_vm(self):546 """547 Create one template from a vm with a thin provisioned disk and one from548 a vm with a preallocated disk. Check templates' disks image format549 """550 assert ll_templates.createTemplate(551 True, vm=self.vm_thin, name=self.template_thin,552 cluster=config.CLUSTER_NAME553 )554 assert ll_templates.createTemplate(555 True, vm=self.vm_prealloc, name=self.template_preallocated,556 cluster=config.CLUSTER_NAME557 )558 config.retrieve_disk_obj = ll_templates.getTemplateDisks559 self.default_disks = {560 self.template_thin: True,561 self.template_preallocated: False,562 }563 self.check_disks()564class TestCase11608(TestCasesCreateTemplate):565 """566 Polarion case 11608567 """568 __test__ = True569 polarion_test_id = '11608'570 @polarion("RHEVM3-11608")571 @tier2572 def test_create_template_from_vm(self):573 """574 Create a template from a vm575 * Thin provisioned disk should remain the same576 * Preallocated disk should remain the same577 """578 self.create_template_from_vm()579class TestCase11607(TestCasesCreateTemplate):580 """581 Polarion case 11607582 """583 __test__ = True584 polarion_test_id = '11607'585 @polarion("RHEVM3-11607")586 @tier3587 def test_create_template_from_vm_with_snapshots(self):588 """589 Create a snapshot to the vm and create a template590 * Thin provisioned disk should remain the same591 * Preallocated disk should remain the same592 """593 self.add_snapshots()594 self.create_template_from_vm()595@pytest.mark.usefixtures(596 remove_template.__name__,597 create_vm.__name__,598 add_disk.__name__,599 attach_disk.__name__,600 clean_export_domain.__name__,601 initialize_template_name.__name__,602)603class TestCase11606(BaseTestDiskImage):604 """605 Test vm with both disk formats606 """607 add_disk_params = {608 'bootable': False,609 'format': config.RAW_DISK,610 'sparse': False611 }612 get_thin_disk = lambda self, x: [613 d.get_alias() for d in ll_vms.getVmDisks(x) if d.get_sparse()614 ][0]615 def check_disks(self):616 """617 Verify the vm and template disks' format618 """619 self.thin_disk_alias = self.get_thin_disk(self.vm_name)620 for function, object_name in [621 (ll_disks.getTemplateDisk, self.template_name),622 (ll_disks.getVmDisk, self.vm_name)623 ]:624 thin_disk = function(object_name, self.thin_disk_alias)625 preallocated_disk = function(626 object_name, self.disk_name,627 )628 assert thin_disk.get_sparse(), (629 "%s disk %s should be thin provisioned" %630 (object_name, thin_disk.get_alias())631 )632 assert not preallocated_disk.get_sparse(), (633 "%s disk %s should be preallocated" %634 (object_name, preallocated_disk.get_alias())635 )636 def action_test(self, collapse=False):637 """638 Export the vm, import it and create a template639 """640 assert ll_vms.exportVm(True, self.vm_name, config.EXPORT_DOMAIN_NAME)641 assert ll_vms.removeVm(True, self.vm_name)642 ll_jobs.wait_for_jobs([config.JOB_REMOVE_VM])643 assert ll_vms.importVm(644 True, self.vm_name, config.EXPORT_DOMAIN_NAME, self.storage_domain,645 config.CLUSTER_NAME, collapse=collapse646 )647 assert ll_templates.createTemplate(648 True, vm=self.vm_name, name=self.template_name649 )650class TestCase11606A(TestCase11606):651 """652 No snapshot on vm653 """654 __test__ = True655 polarion_test_id = '11606'656 @polarion("RHEVM3-11606")657 @tier3658 def test_different_format_same_vm(self):659 """660 Polarion case id: 11606 - no snapshot661 * Thin provisioned disk should remain the same662 * Preallocated disk should remain the same663 """664 self.action_test()665 self.check_disks()666class TestCase11606B(TestCase11606):667 """668 Snapshot on vm669 """670 __test__ = True671 polarion_test_id = '11606'672 deep_copy = True673 @polarion("RHEVM3-11606")674 @tier3675 def test_different_format_same_vm_with_snapshot(self):676 """677 Polarion case id: 11606 - with snapshot678 * Thin provisioned disk should remain the same679 * Preallocated disk should remain the same...

Full Screen

Full Screen

byte-scan.py

Source:byte-scan.py Github

copy

Full Screen

1#!/usr/bin/python32import os3import sys4import subprocess5import hashlib6import json7import mmap8# script assumes running as root9likely_header = bytearray.fromhex("00000000010000000000000062310500")10# list of system drives to not attempt to mount11ignore_drives = ("7a907b0c-4f8a-4350-b2b4-c804abca9622", "ed5b6085-40e0-463e-b2fa-438f6cb9ddcb", "6d406c55-5de8-4401-b2ee-92bd1046520c", "80b7f420-7699-476c-9db1-c0108bb661aa")12def main():13 meta_file = open("./wallets/meta.json", 'r')14 contents = meta_file.read()15 meta_file.close()16 wallet_meta = json.loads(contents)17 print('loaded existing ./wallets/meta.json')18 matches = {}19 if len(sys.argv) >= 2:20 if len(sys.argv) >= 3:21 print("specify no args to scan all disks, or 1 arg to specify a specific file")22 return23 24 disk = sys.argv[1]25 matches[disk] = []26 diskMatches = matches[disk]27 checkFile(disk, diskMatches)28 else:29 check_disks = [f for f in os.listdir('/dev/disk/by-uuid') if f not in ignore_drives]30 print("drives to scan: ", check_disks)31 for disk in check_disks:32 matches[disk] = []33 diskMatches = matches[disk]34 diskname = '/dev/disk/by-uuid/{}'.format(disk)35 # diskpath = os.path.realpath(diskname)36 checkFile(diskname, diskMatches)37 # compare results of disk scan with results of meta.json38 print()39 print('results')40 print()41 for disk in matches:42 diskMatches=matches[disk]43 print('found matches: {}'.format(len(diskMatches)))44 print('matching indices: {}', diskMatches)45 print()46 metaMatches = []47 for meta in wallet_meta.values():48 if disk == meta["drive"] and meta["likelyWallet"] == True:49 metaMatches.append(meta["fulldir"])50 print('meta matches: {}'.format(len(metaMatches)))51 52 if len(metaMatches) == len(diskMatches):53 print('all likely wallets found')54 else:55 # TODO print differences56 print('wallet count mismatch')57# checkFile scans a block device or file for bitcoin signatures58def checkFile(diskname, diskMatches):59 print('scanning: ' + diskname)60 with open(diskname, 'r+b') as file:61 print("fileno {}".format(file.fileno()))62 file.seek(0, 2) # move to end of file63 size = file.tell() # get size64 file.seek(0,0) # move back to start (is this needed?)65 with mmap.mmap(file.fileno(), length=size, access=mmap.ACCESS_READ) as s:66 idx = 067 while True:68 index = s.find(likely_header, idx)69 if index == -1:70 break71 # print('found match: {}'.format(index))72 diskMatches.append(index)73 idx = index + 1...

Full Screen

Full Screen

mount-drives.py

Source:mount-drives.py Github

copy

Full Screen

1#!/usr/bin/python32import os3import subprocess4# script assumes running as root5# list of system drives to not attempt to mount6ignore_drives = ("7a907b0c-4f8a-4350-b2b4-c804abca9622", "ed5b6085-40e0-463e-b2fa-438f6cb9ddcb", "6d406c55-5de8-4401-b2ee-92bd1046520c", "80b7f420-7699-476c-9db1-c0108bb661aa")7def main():8 9 check_disks = [f for f in os.listdir('/dev/disk/by-uuid') if f not in ignore_drives]10 print("drives to mount: ", check_disks)11 try:12 os.mkdir("./mounts")13 except FileExistsError:14 print('./mount exists')15 16 print("cleaning up mounts folder")17 clean_mounts = os.listdir("./mounts")18 for f in clean_mounts:19 subprocess.run('umount ./mounts/{}'.format(f), shell=True)20 subprocess.run("rmdir ./mounts/{}".format(f), shell=True)21 print('unmounted ' + f)22 mounts = subprocess.getoutput('mount')23 print("mounting disks") 24 for f in check_disks:25 try:26 os.mkdir("./mounts/" + f)27 except FileExistsError:28 print('folder exists: ' + f)29 if not 'mounts/{}'.format(f) in mounts:30 subprocess.run('mount -o ro /dev/disk/by-uuid/{} ./mounts/{}'.format(f, f), shell=True)31 print('mounted: ' + f)32 else:33 print('folder already mounted: ' + f)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run tempest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful