How to use hyperv_remove_nested_vm method in lisa

Best Python code snippet using lisa_python

nestedperf.py

Source:nestedperf.py Github

copy

Full Screen

...397 )398 finally:399 # cleanup server400 try:401 hyperv_remove_nested_vm(server_l1, "server_l2")402 except Exception as e:403 log.debug(f"Failed to clean up server vm: {e}")404 server_l1.mark_dirty()405 # cleanup client406 try:407 hyperv_remove_nested_vm(client_l1, "client_l2")408 except Exception as e:409 log.debug(f"Failed to clean up client vm: {e}")410 client_l1.mark_dirty()411 @TestCaseMetadata(412 description="""413 This script runs netperf test on two nested VMs on different L1 guests414 connected with NAT415 """,416 priority=3,417 timeout=_TIME_OUT,418 requirement=simple_requirement(419 min_count=2,420 network_interface=schema.NetworkInterfaceOptionSettings(421 nic_count=search_space.IntRange(min=2),422 ),423 disk=schema.DiskOptionSettings(424 data_disk_count=search_space.IntRange(min=1),425 data_disk_size=search_space.IntRange(min=12),426 ),427 ),428 )429 def perf_nested_kvm_netperf_pps_nat(430 self,431 result: TestResult,432 variables: Dict[str, Any],433 log: Logger,434 ) -> None:435 environment = result.environment436 assert environment, "fail to get environment from testresult"437 server_l1 = cast(RemoteNode, environment.nodes[0])438 client_l1 = cast(RemoteNode, environment.nodes[1])439 # parse nested image variables440 (441 nested_image_username,442 nested_image_password,443 _,444 nested_image_url,445 ) = parse_nested_image_variables(variables)446 try:447 # setup nested vm on server in NAT configuration448 server_l2 = self._linux_setup_nat(449 node=server_l1,450 nested_vm_name="server_l2",451 guest_username=nested_image_username,452 guest_password=nested_image_password,453 guest_port=self._SERVER_HOST_FWD_PORT,454 guest_image_url=nested_image_url,455 guest_internal_ip=self._SERVER_IP_ADDR,456 guest_default_nic=self._NIC_NAME,457 bridge_name=self._BR_NAME,458 bridge_network=self._BR_NETWORK,459 bridge_cidr=self._BR_CIDR,460 bridge_gateway=self._BR_GATEWAY,461 )462 # setup nested vm on client in NAT configuration463 client_l2 = self._linux_setup_nat(464 node=client_l1,465 nested_vm_name="client_l2",466 guest_username=nested_image_username,467 guest_password=nested_image_password,468 guest_port=self._CLIENT_HOST_FWD_PORT,469 guest_image_url=nested_image_url,470 guest_internal_ip=self._CLIENT_IP_ADDR,471 guest_default_nic=self._NIC_NAME,472 bridge_name=self._BR_NAME,473 bridge_network=self._BR_NETWORK,474 bridge_cidr=self._BR_CIDR,475 bridge_gateway=self._BR_GATEWAY,476 )477 # run netperf test478 perf_tcp_pps(result, "singlepps", server_l2, client_l2)479 finally:480 self._linux_cleanup_nat(server_l1, self._BR_NAME, log)481 self._linux_cleanup_nat(client_l1, self._BR_NAME, log)482 def _linux_setup_nat(483 self,484 node: RemoteNode,485 nested_vm_name: str,486 guest_username: str,487 guest_password: str,488 guest_port: int,489 guest_image_url: str,490 guest_internal_ip: str,491 guest_default_nic: str,492 bridge_name: str,493 bridge_network: str,494 bridge_cidr: str,495 bridge_gateway: str,496 ) -> RemoteNode:497 """498 Setup NAT on the node with following configurations:499 1. Forward traffic on node's eth0 interface and port `guest_port`500 to the nested VM's port 22.501 2. Forward all traffic on node's eth1 interface to the nested VM.502 """503 # get core count504 core_count = node.tools[Lscpu].get_core_count()505 node_eth1_ip = node.nics.get_nic("eth1").ip_addr506 bridge_dhcp_range = f"{guest_internal_ip},{guest_internal_ip}"507 # enable ip forwarding508 node.tools[Sysctl].write("net.ipv4.ip_forward", "1")509 # setup bridge510 node.tools[Ip].setup_bridge(bridge_name, f"{bridge_gateway}/{bridge_cidr}")511 node.tools[Ip].set_bridge_configuration(bridge_name, "stp_state", "0")512 node.tools[Ip].set_bridge_configuration(bridge_name, "forward_delay", "0")513 # reset bridge lease file to remove old dns leases514 node.execute(515 f"cp /dev/null /var/run/qemu-dnsmasq-{bridge_name}.leases", sudo=True516 )517 # start dnsmasq518 node.tools[Dnsmasq].start(bridge_name, bridge_gateway, bridge_dhcp_range)519 # reset filter table to accept all traffic520 node.tools[Iptables].reset_table()521 # reset nat table and setup nat forwarding522 node.tools[Iptables].reset_table("nat")523 node.tools[Iptables].run(524 f"-t nat -A POSTROUTING -s {bridge_network}/{bridge_cidr} -j MASQUERADE",525 sudo=True,526 force_run=True,527 )528 # start nested vm529 nested_vm = qemu_connect_nested_vm(530 node,531 guest_username,532 guest_password,533 guest_port,534 guest_image_url,535 taps=1,536 cores=core_count,537 bridge=bridge_name,538 stop_existing_vm=True,539 name=nested_vm_name,540 )541 # configure rc.local to run dhclient on reboot542 nested_vm.tools[StartConfiguration].add_command("ip link set dev ens4 up")543 nested_vm.tools[StartConfiguration].add_command("dhclient ens4")544 # reboot nested vm and close ssh connection545 nested_vm.execute("reboot", sudo=True)546 nested_vm.close()547 # route traffic on `eth0` and port `guest_port` on l1 vm to548 # port 22 on l2 vm549 node.tools[Iptables].run(550 f"-t nat -A PREROUTING -i eth0 -p tcp --dport {guest_port} "551 f"-j DNAT --to {guest_internal_ip}:22",552 sudo=True,553 force_run=True,554 )555 # route all tcp traffic on `eth1` port on l1 vm to l2 vm556 node.tools[Iptables].run(557 f"-t nat -A PREROUTING -i eth1 -d {node_eth1_ip} "558 f"-p tcp -j DNAT --to {guest_internal_ip}",559 sudo=True,560 force_run=True,561 )562 # wait till nested vm is up563 try_connect(564 schema.ConnectionInfo(565 address=node.public_address,566 port=guest_port,567 username=guest_username,568 password=guest_password,569 )570 )571 # set default nic interfaces on l2 vm572 nested_vm.internal_address = node_eth1_ip573 nested_vm.capability.network_interface = Synthetic()574 return nested_vm575 def _linux_cleanup_nat(576 self,577 node: RemoteNode,578 bridge_name: str,579 log: Logger,580 ) -> None:581 try:582 # stop running QEMU instances583 node.tools[Qemu].delete_vm()584 # clear bridge and taps585 node.tools[Ip].delete_interface(bridge_name)586 # flush ip tables587 node.tools[Iptables].reset_table()588 node.tools[Iptables].reset_table("nat")589 except Exception as e:590 log.debug(f"Failed to clean up NAT configuration: {e}")591 node.mark_dirty()592 def _storage_perf_qemu(593 self,594 node: RemoteNode,595 result: TestResult,596 variables: Dict[str, Any],597 log: Logger,598 filename: str = "/dev/sdb",599 start_iodepth: int = 1,600 max_iodepth: int = 1024,601 setup_raid: bool = True,602 ) -> None:603 (604 nested_image_username,605 nested_image_password,606 nested_image_port,607 nested_image_url,608 ) = parse_nested_image_variables(variables)609 # get data disks and remove disk we will use for downloading610 # nested image611 l1_data_disks = node.features[Disk].get_raw_data_disks()612 log.debug(f"l1_data_disks: {l1_data_disks}")613 image_download_location = node.find_partition_with_freespace(614 NESTED_VM_REQUIRED_DISK_SIZE_IN_GB615 )616 image_download_disk = (617 node.tools[Lsblk]618 .find_disk_by_mountpoint(image_download_location, force_run=True)619 .device_name620 )621 log.debug(f"image_download_disk: {image_download_disk}")622 if image_download_disk in l1_data_disks:623 l1_data_disks.remove(image_download_disk)624 l1_data_disk_count = len(l1_data_disks)625 try:626 # setup raid on l1 data disks627 if setup_raid:628 disks = ["/dev/md0"]629 l1_partition_disks = reset_partitions(node, l1_data_disks)630 stop_raid(node)631 reset_raid(node, l1_partition_disks)632 else:633 disks = [l1_data_disks[0]]634 # get l2 vm635 l2_vm = qemu_connect_nested_vm(636 node,637 nested_image_username,638 nested_image_password,639 nested_image_port,640 nested_image_url,641 disks=disks,642 )643 l2_vm.capability.network_interface = Synthetic()644 # Qemu command exits immediately but the VM requires some time to boot up.645 l2_vm.tools[Lscpu].get_core_count()646 # Each fio process start jobs equal to the iodepth to read/write from647 # the disks. The max number of jobs can be equal to the core count of648 # the node.649 # Examples:650 # iodepth = 4, core count = 8 => max_jobs = 4651 # iodepth = 16, core count = 8 => max_jobs = 8652 num_jobs = []653 iodepth_iter = start_iodepth654 core_count = node.tools[Lscpu].get_core_count()655 while iodepth_iter <= max_iodepth:656 num_jobs.append(min(iodepth_iter, core_count))657 iodepth_iter = iodepth_iter * 2658 # Run fio test659 # The added disks appear as /dev/sdb on the nested vm660 perf_disk(661 l2_vm,662 start_iodepth,663 max_iodepth,664 filename,665 test_name=inspect.stack()[1][3],666 core_count=core_count,667 disk_count=l1_data_disk_count,668 disk_setup_type=DiskSetupType.raid0,669 disk_type=DiskType.premiumssd,670 test_result=result,671 num_jobs=num_jobs,672 size_mb=8192,673 overwrite=True,674 )675 finally:676 try:677 node.tools[Qemu].delete_vm()678 stop_raid(node)679 except Exception as e:680 log.debug(f"Failed to cleanup Qemu VM: {e}")681 node.mark_dirty()682 def _storage_perf_hyperv(683 self,684 node: RemoteNode,685 test_result: TestResult,686 variables: Dict[str, Any],687 log: Logger,688 filename: str = "/dev/sdb",689 start_iodepth: int = 1,690 max_iodepth: int = 1024,691 setup_raid: bool = False,692 ) -> None:693 (694 nested_image_username,695 nested_image_password,696 nested_image_port,697 nested_image_url,698 ) = parse_nested_image_variables(variables)699 mdadm = node.tools[Mdadm]700 try:701 # cleanup any previous raid configurations to free702 # data disks703 mdadm.stop_raid()704 # get data disk id705 powershell = node.tools[PowerShell]706 data_disks_id_str = powershell.run_cmdlet(707 "(Get-Disk | "708 "Where-Object {$_.FriendlyName -eq 'Msft Virtual Disk'}).Number"709 )710 data_disks_id = data_disks_id_str.strip().replace("\r", "").split("\n")711 # set data disks offline712 for disk in data_disks_id:713 powershell.run_cmdlet(714 f"Set-Disk -Number {disk} -IsOffline $true", force_run=True715 )716 # create raid717 if setup_raid:718 mdadm.create_raid(data_disks_id)719 # get l2 vm720 nested_vm = hyperv_connect_nested_vm(721 node,722 nested_image_username,723 nested_image_password,724 nested_image_port,725 nested_image_url,726 )727 # Each fio process start jobs equal to the iodepth to read/write from728 # the disks. The max number of jobs can be equal to the core count of729 # the node.730 # Examples:731 # iodepth = 4, core count = 8 => max_jobs = 4732 # iodepth = 16, core count = 8 => max_jobs = 8733 num_jobs = []734 iodepth_iter = start_iodepth735 core_count = node.tools[Lscpu].get_core_count()736 while iodepth_iter <= max_iodepth:737 num_jobs.append(min(iodepth_iter, core_count))738 iodepth_iter = iodepth_iter * 2739 # run fio test740 perf_disk(741 nested_vm,742 start_iodepth,743 max_iodepth,744 filename,745 test_name=inspect.stack()[1][3],746 core_count=core_count,747 disk_count=1,748 disk_setup_type=DiskSetupType.raid0,749 disk_type=DiskType.premiumssd,750 test_result=test_result,751 num_jobs=num_jobs,752 size_mb=8192,753 overwrite=True,754 )755 finally:756 try:757 hyperv_remove_nested_vm(node)758 node.tools[Mdadm].stop_raid()759 except Exception as e:760 log.debug(f"Failed to cleanup Hyper-V vm: {e}")761 node.mark_dirty()762 def _windows_setup_nat(763 self,764 node: RemoteNode,765 nested_vm_name: str,766 guest_username: str,767 guest_password: str,768 guest_port: int,769 guest_image_url: str,770 ) -> RemoteNode:771 nested_vm = hyperv_connect_nested_vm(...

Full Screen

Full Screen

common.py

Source:common.py Github

copy

Full Screen

...132 nested_vm.capability.network_interface = Synthetic()133 # wait for nested vm ssh connection to be ready134 try_connect(connection_info)135 return nested_vm136def hyperv_remove_nested_vm(137 host: RemoteNode,138 name: str = "L2-VM",139 image_name: str = HYPERV_NESTED_VM_IMAGE_NAME,140 switch_name: str = "nestedvmswitch",141 nat_name: str = "nestedvmnat",142) -> None:143 image_name = f"{name}_{image_name}"144 file_path = f"{HYPER_IMAGE_FOLDER}\\{image_name}"145 hyperv = host.tools[HyperV]146 # Delete VM147 hyperv.delete_vm(name)148 # delete image149 host.tools[Rm].remove_file(file_path)150 # delete nat network...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful