How to use _get_node_ip_address method in lisa

Best Python code snippet using lisa_python

platform.py

Source:platform.py Github

copy

Full Screen

...500 address = conn_info[constants.ENVIRONMENTS_NODES_REMOTE_ADDRESS]501 for node in environment.nodes.list():502 assert isinstance(node, RemoteNode)503 # Get the VM's IP address.504 local_address = self._get_node_ip_address(505 environment, log, lv_conn, node, timeout506 )507 node_port = 22508 if self.host_node.is_remote:509 with self._port_forwarding_lock:510 port_not_found = True511 while port_not_found:512 if self._next_available_port > 65535:513 raise LisaException(514 "No available ports on the host to forward"515 )516 # check if the port is already in use517 output = self.host_node.execute(518 f"nc -vz 127.0.0.1 {self._next_available_port}"519 )520 if output.exit_code == 1: # port not in use521 node_port = self._next_available_port522 port_not_found = False523 self._next_available_port += 1524 self.host_node.tools[Iptables].start_forwarding(525 node_port, local_address, 22526 )527 environment_context.port_forwarding_list.append(528 (node_port, local_address)529 )530 else:531 address = local_address532 # Set SSH connection info for the node.533 node.set_connection_info(534 address=local_address,535 public_address=address,536 public_port=node_port,537 username=self.runbook.admin_username,538 private_key_file=self.runbook.admin_private_key_file,539 )540 # Ensure cloud-init completes its setup.541 node.execute(542 "cloud-init status --wait",543 sudo=True,544 expected_exit_code=0,545 expected_exit_code_failure_message="waiting on cloud-init",546 )547 # Create a cloud-init ISO for a VM.548 def _create_node_cloud_init_iso(549 self, environment: Environment, log: Logger, node: Node550 ) -> None:551 environment_context = get_environment_context(environment)552 node_context = get_node_context(node)553 user_data = {554 "users": [555 "default",556 {557 "name": self.runbook.admin_username,558 "shell": "/bin/bash",559 "sudo": ["ALL=(ALL) NOPASSWD:ALL"],560 "groups": ["sudo", "docker"],561 "ssh_authorized_keys": [environment_context.ssh_public_key],562 },563 ],564 }565 # Iterate through all the top-level properties.566 for extra_user_data in node_context.extra_cloud_init_user_data:567 for key, value in extra_user_data.items():568 existing_value = user_data.get(key)569 if not existing_value:570 # Property doesn't exist yet. So, add it.571 user_data[key] = value572 elif isinstance(existing_value, dict) and isinstance(value, dict):573 # Merge two dictionaries by adding properties from new value and574 # replacing any existing properties.575 # Examples: disk_setup, etc.576 existing_value.update(value)577 elif isinstance(existing_value, list) and isinstance(value, list):578 # Merge two lists by appending to the end of the existing list.579 # Examples: write_files, runcmd, etc.580 existing_value.extend(value)581 else:582 # String, unknown type or mismatched type.583 # Just replace the existing property.584 user_data[key] = value585 meta_data = {586 "local-hostname": node_context.vm_name,587 }588 # Note: cloud-init requires the user-data file to be prefixed with589 # `#cloud-config`.590 user_data_string = "#cloud-config\n" + yaml.safe_dump(user_data)591 meta_data_string = yaml.safe_dump(meta_data)592 iso_path = node_context.cloud_init_file_path593 tmp_dir = tempfile.TemporaryDirectory()594 try:595 iso_path = os.path.join(tmp_dir.name, "cloud-init.iso")596 self._create_iso(597 iso_path,598 [("/user-data", user_data_string), ("/meta-data", meta_data_string)],599 )600 self.host_node.shell.copy(601 Path(iso_path), Path(node_context.cloud_init_file_path)602 )603 finally:604 tmp_dir.cleanup()605 # Create an ISO file.606 def _create_iso(self, file_path: str, files: List[Tuple[str, str]]) -> None:607 iso = pycdlib.PyCdlib()608 iso.new(joliet=3, vol_ident="cidata")609 for i, file in enumerate(files):610 path, contents = file611 contents_data = contents.encode()612 iso.add_fp(613 io.BytesIO(contents_data),614 len(contents_data),615 f"/{i}.;1",616 joliet_path=path,617 )618 iso.write(file_path)619 # Create the OS disk.620 def _create_node_os_disk(621 self, environment: Environment, log: Logger, node: Node622 ) -> None:623 raise NotImplementedError()624 def _create_node_data_disks(self, node: Node) -> None:625 node_context = get_node_context(node)626 qemu_img = self.host_node.tools[QemuImg]627 for disk in node_context.data_disks:628 qemu_img.create_new_qcow2(disk.file_path, disk.size_gib * 1024)629 # Create the XML definition for the VM.630 def _create_node_domain_xml(631 self,632 environment: Environment,633 log: Logger,634 node: Node,635 lv_conn: libvirt.virConnect,636 ) -> str:637 node_context = get_node_context(node)638 domain = ET.Element("domain")639 domain.attrib["type"] = "kvm"640 name = ET.SubElement(domain, "name")641 name.text = node_context.vm_name642 memory = ET.SubElement(domain, "memory")643 memory.attrib["unit"] = "MiB"644 assert isinstance(node.capability.memory_mb, int)645 memory.text = str(node.capability.memory_mb)646 vcpu = ET.SubElement(domain, "vcpu")647 assert isinstance(node.capability.core_count, int)648 vcpu.text = str(node.capability.core_count)649 os_tag = ET.SubElement(domain, "os")650 os_type = ET.SubElement(os_tag, "type")651 os_type.text = "hvm"652 if node_context.machine_type:653 os_type.attrib["machine"] = node_context.machine_type654 if not node_context.use_bios_firmware:655 # In an ideal world, we would use libvirt's firmware auto-selection feature.656 # Unfortunatley, it isn't possible to specify the secure-boot state until657 # libvirt v7.2.0 and Ubuntu 20.04 only has libvirt v6.0.0. Therefore, we658 # have to select the firmware manually.659 firmware_config = self._get_firmware_config(660 lv_conn, node_context.machine_type, node_context.enable_secure_boot661 )662 print(firmware_config)663 loader = ET.SubElement(os_tag, "loader")664 loader.attrib["readonly"] = "yes"665 loader.attrib["type"] = "pflash"666 loader.attrib["secure"] = "yes" if node_context.enable_secure_boot else "no"667 loader.text = firmware_config["mapping"]["executable"]["filename"]668 nvram = ET.SubElement(os_tag, "nvram")669 nvram.attrib["template"] = firmware_config["mapping"]["nvram-template"][670 "filename"671 ]672 features = ET.SubElement(domain, "features")673 ET.SubElement(features, "acpi")674 ET.SubElement(features, "apic")675 cpu = ET.SubElement(domain, "cpu")676 cpu.attrib["mode"] = "host-passthrough"677 clock = ET.SubElement(domain, "clock")678 clock.attrib["offset"] = "utc"679 on_poweroff = ET.SubElement(domain, "on_poweroff")680 on_poweroff.text = "destroy"681 on_reboot = ET.SubElement(domain, "on_reboot")682 on_reboot.text = "restart"683 on_crash = ET.SubElement(domain, "on_crash")684 on_crash.text = "destroy"685 devices = ET.SubElement(domain, "devices")686 serial = ET.SubElement(devices, "serial")687 serial.attrib["type"] = "pty"688 serial_target = ET.SubElement(serial, "target")689 serial_target.attrib["type"] = "isa-serial"690 serial_target.attrib["port"] = "0"691 serial_target_model = ET.SubElement(serial_target, "model")692 serial_target_model.attrib["name"] = "isa-serial"693 console = ET.SubElement(devices, "console")694 console.attrib["type"] = "pty"695 console_target = ET.SubElement(console, "target")696 console_target.attrib["type"] = "serial"697 console_target.attrib["port"] = "0"698 video = ET.SubElement(devices, "video")699 video_model = ET.SubElement(video, "model")700 if isinstance(self.host_node.os, CBLMariner):701 video_model.attrib["type"] = "vga"702 else:703 video_model.attrib["type"] = "qxl"704 graphics = ET.SubElement(devices, "graphics")705 graphics.attrib["type"] = "spice"706 network_interface = ET.SubElement(devices, "interface")707 network_interface.attrib["type"] = "network"708 network_interface_source = ET.SubElement(network_interface, "source")709 network_interface_source.attrib["network"] = "default"710 network_interface_model = ET.SubElement(network_interface, "model")711 network_interface_model.attrib["type"] = "virtio"712 self._add_disk_xml(713 node_context,714 devices,715 node_context.cloud_init_file_path,716 "cdrom",717 "raw",718 "sata",719 )720 self._add_disk_xml(721 node_context,722 devices,723 node_context.os_disk_file_path,724 "disk",725 "qcow2",726 "virtio",727 )728 for data_disk in node_context.data_disks:729 self._add_disk_xml(730 node_context,731 devices,732 data_disk.file_path,733 "disk",734 "qcow2",735 "virtio",736 )737 xml = ET.tostring(domain, "unicode")738 return xml739 def _add_disk_xml(740 self,741 node_context: NodeContext,742 devices: ET.Element,743 file_path: str,744 device_type: str,745 image_type: str,746 bus_type: str,747 ) -> None:748 device_name = self._new_disk_device_name(node_context)749 disk = ET.SubElement(devices, "disk")750 disk.attrib["type"] = "file"751 disk.attrib["device"] = device_type752 disk_driver = ET.SubElement(disk, "driver")753 disk_driver.attrib["name"] = "qemu"754 disk_driver.attrib["type"] = image_type755 disk_target = ET.SubElement(disk, "target")756 disk_target.attrib["dev"] = device_name757 disk_target.attrib["bus"] = bus_type758 disk_source = ET.SubElement(disk, "source")759 disk_source.attrib["file"] = file_path760 def _add_virtio_disk_xml(761 self,762 node_context: NodeContext,763 devices: ET.Element,764 file_path: str,765 queues: int,766 ) -> None:767 device_name = self._new_disk_device_name(node_context, True)768 disk = ET.SubElement(devices, "disk")769 disk.attrib["type"] = "file"770 disk_driver = ET.SubElement(disk, "driver")771 disk_driver.attrib["if"] = "virtio"772 disk_driver.attrib["type"] = "raw"773 disk_driver.attrib["queues"] = str(queues)774 disk_target = ET.SubElement(disk, "target")775 disk_target.attrib["dev"] = device_name776 disk_source = ET.SubElement(disk, "source")777 disk_source.attrib["file"] = file_path778 def _new_disk_device_name(779 self,780 node_context: NodeContext,781 is_paravirtualized: bool = False,782 ) -> str:783 disk_index = node_context.next_disk_index784 node_context.next_disk_index += 1785 device_name = self._get_disk_device_name(disk_index, is_paravirtualized)786 return device_name787 def _get_disk_device_name(788 self, disk_index: int, is_paravirtualized: bool = False789 ) -> str:790 # The disk device name is required to follow the standard Linux device naming791 # scheme. That is: [ sda, sdb, ..., sdz, sdaa, sdab, ... ]. However, it is792 # unlikely that someone will ever need more than 26 disks. So, keep is simple793 # for now.794 if disk_index < 0 or disk_index > 25:795 raise LisaException(f"Unsupported disk index: {disk_index}.")796 prefix = "v" if is_paravirtualized else "s"797 suffix = chr(ord("a") + disk_index)798 return f"{prefix}d{suffix}"799 # Wait for the VM to boot and then get the IP address.800 def _get_node_ip_address(801 self,802 environment: Environment,803 log: Logger,804 lv_conn: libvirt.virConnect,805 node: Node,806 timeout: float,807 ) -> str:808 node_context = get_node_context(node)809 while True:810 addr = self._try_get_node_ip_address(environment, log, lv_conn, node)811 if addr:812 return addr813 if time.time() > timeout:814 raise LisaException(f"no IP addresses found for {node_context.vm_name}")815 # Try to get the IP address of the VM.816 def _try_get_node_ip_address(817 self,818 environment: Environment,819 log: Logger,820 lv_conn: libvirt.virConnect,821 node: Node,822 ) -> Optional[str]:823 node_context = get_node_context(node)824 domain = lv_conn.lookupByName(node_context.vm_name)825 # Acquire IP address from libvirt's DHCP server.826 interfaces = domain.interfaceAddresses(827 libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE828 )829 if len(interfaces) < 1:830 return None...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful