Best Python code snippet using lisa_python
dpdktestpmd.py
Source:dpdktestpmd.py  
...131                "to querying the version information."132            )133        # black doesn't like to direct return VersionInfo comparison134        return bool(dpdk_version >= "19.11.0")  # appease the type checker135    def use_package_manager_install(self) -> bool:136        assert_that(hasattr(self, "_dpdk_source")).described_as(137            "_dpdk_source was not set in DpdkTestpmd instance. "138            "set_dpdk_source must be called before instantiation."139        ).is_true()140        if self._dpdk_source == PACKAGE_MANAGER_SOURCE:141            return True142        else:143            return False144    def set_version_info_from_source_install(145        self, branch_identifier: str, matcher: Pattern[str]146    ) -> None:147        match = matcher.search(branch_identifier)148        if not match or not match.group("major") or not match.group("minor"):149            fail(150                f"Could not determine dpdk version info from '{self._dpdk_source}'"151                f" with id: '{branch_identifier}' using regex: '{matcher.pattern}'"152            )153        else:154            major, minor = map(int, [match.group("major"), match.group("minor")])155            self._dpdk_version_info: VersionInfo = VersionInfo(major, minor)156    def generate_testpmd_include(157        self, node_nic: NicInfo, vdev_id: int, use_max_nics: bool = False158    ) -> str:159        # handle generating different flags for pmds/device combos for testpmd160        # identify which nics to inlude in test, exclude others161        # TODO: Test does not make full use of multiple nics yet.162        if use_max_nics:163            self.node.log.warn(164                "NOTE: Testpmd suite does not yet make full use of multiple nics"165            )166        # if use_max_nics:167        #     ssh_nic = self.node.nics.get_nic(self.node.nics.default_nic)168        #     include_nics = [169        #         self.node.nics.get_nic(nic)170        #         for nic in self.node.nics.get_upper_nics()171        #         if nic != ssh_nic.upper172        #     ]173        #     exclude_nics = [ssh_nic]174        # else:175        include_nics = [node_nic]176        exclude_nics = [177            self.node.nics.get_nic(nic)178            for nic in self.node.nics.get_upper_nics()179            if nic != node_nic.upper180        ]181        # build list of vdev info flags for each nic182        vdev_info = ""183        self.node.log.info(f"Running test with {len(include_nics)} nics.")184        for nic in include_nics:185            if self._dpdk_version_info and self._dpdk_version_info >= "18.11.0":186                vdev_name = "net_vdev_netvsc"187                vdev_flags = f"iface={nic.upper},force=1"188            else:189                vdev_name = "net_failsafe"190                vdev_flags = (191                    f"dev({nic.pci_slot}),dev(net_tap0,iface={nic.upper},force=1)"192                )193            if nic.bound_driver == "hv_netvsc":194                vdev_info += f'--vdev="{vdev_name}{vdev_id},{vdev_flags}" '195            elif nic.bound_driver == "uio_hv_generic":196                pass197            else:198                fail(199                    (200                        f"Unknown driver({nic.bound_driver}) bound to "201                        f"{nic.upper}/{nic.lower}."202                        "Cannot generate testpmd include arguments."203                    )204                )205        # exclude pci slots not associated with the test nic206        exclude_flags = ""207        for nic in exclude_nics:208            exclude_flags += f' -b "{nic.pci_slot}"'209        return vdev_info + exclude_flags210    def _calculate_core_count(211        self,212        cores_available: int,213        txq: int,214        rxq: int,215        use_max_nics: bool,216        service_cores: int = 1,217    ) -> int:218        # Use either:219        #   - as many cores as are available, minus a core for the system220        #   - 1 per queue on each nic + one per NIC PMD221        # this is a no-op for now,222        # test does not correctly handle multiple nics yet223        if use_max_nics:224            pass225        nics_available = 1226        return min(227            cores_available - 1,228            (nics_available * (txq + rxq)) + (nics_available * service_cores),229        )230    def generate_testpmd_command(231        self,232        nic_to_include: NicInfo,233        vdev_id: int,234        mode: str,235        pmd: str,236        extra_args: str = "",237        txq: int = 0,238        rxq: int = 0,239        service_cores: int = 1,240        use_max_nics: bool = False,241    ) -> str:242        #   testpmd \243        #   -l <core-list> \244        #   -n <num of mem channels> \245        #   -w <pci address of the device you plan to use> \246        #   --vdev="net_vdev_netvsc<id>,iface=<the iface to attach to>" \247        #   -- --port-topology=chained \248        #   --nb-cores <number of cores to use for test pmd> \249        #   --forward-mode=txonly \250        #   --eth-peer=<port id>,<receiver peer MAC address> \251        #   --stats-period <display interval in seconds>252        # if test asks for multicore, it implies using more than one nic253        # otherwise default core count for single nic will be used254        # and then adjusted for queue count255        if not (rxq or txq):256            txq = 1257            rxq = 1258        # calculate the available cores per numa node, infer the offset259        # required for core selection argument260        cores_available = self.node.tools[Lscpu].get_core_count()261        numa_node_count = self.node.tools[Lscpu].get_numa_node_count()262        nic_numa_node = self.node.nics._get_nic_numa_node(nic_to_include.lower)263        cores_per_numa = cores_available // numa_node_count264        numa_core_offset = cores_per_numa * nic_numa_node265        # calculate how many cores to use based on txq/rxq per nic and how many nics266        use_core_count = self._calculate_core_count(267            cores_per_numa, txq, rxq, use_max_nics, service_cores=service_cores268        )269        nic_include_info = self.generate_testpmd_include(270            nic_to_include, vdev_id, use_max_nics271        )272        # set up queue arguments273        if txq or rxq:274            # set number of queues to use for txq and rxq (per nic, default is 1)275            assert_that(txq).described_as(276                "TX queue value must be greater than 0 if txq is used"277            ).is_greater_than(0)278            assert_that(rxq).described_as(279                "RX queue value must be greater than 0 if rxq is used"280            ).is_greater_than(0)281            extra_args += f" --txq={txq} --rxq={rxq}  "282        assert_that(use_core_count).described_as(283            "Selection asked for more cores than were available for numa "284            f"{nic_numa_node}. Requested {use_core_count}"285        ).is_less_than_or_equal_to(cores_per_numa)286        # use the selected amount of cores, adjusting for 0 index.287        core_args = f"-l {numa_core_offset}-{numa_core_offset + use_core_count-1}"288        return (289            f"{self._testpmd_install_path} {core_args} -n 4 --proc-type=primary "290            f"{nic_include_info} -- --forward-mode={mode} {extra_args} "291            "-a --stats-period 2 --port-topology=chained"292        )293    def run_for_n_seconds(self, cmd: str, timeout: int) -> str:294        self._last_run_timeout = timeout295        self.node.log.info(f"{self.node.name} running: {cmd}")296        proc_result = self.node.tools[Timeout].run_with_timeout(297            cmd, timeout, SIGINT, kill_timeout=timeout + 10298        )299        self._last_run_output = proc_result.stdout300        self.populate_performance_data()301        return proc_result.stdout302    def check_testpmd_is_running(self) -> bool:303        pids = self.node.tools[Pidof].get_pids(self.command, sudo=True)304        return len(pids) > 0305    def kill_previous_testpmd_command(self) -> None:306        # kill testpmd early307        self.node.tools[Kill].by_name(self.command, ignore_not_exist=True)308        if self.check_testpmd_is_running():309            self.node.log.debug(310                "Testpmd is not responding to signals, "311                "attempt network connection reset."312            )313            # reset node connections (quicker and less risky than netvsc reset)314            self.node.close()315            if not self.check_testpmd_is_running():316                return317            self.node.log.debug(318                "Testpmd is not responding to signals, attempt reload of hv_netvsc."319            )320            # if this somehow didn't kill it, reset netvsc321            self.node.tools[Modprobe].reload(["hv_netvsc"])322            if self.check_testpmd_is_running():323                raise LisaException("Testpmd has hung, killing the test.")324            else:325                self.node.log.debug(326                    "Testpmd killed with hv_netvsc reload. "327                    "Proceeding with processing test run results."328                )329    def get_data_from_testpmd_output(330        self,331        search_key_constant: str,332        testpmd_output: str,333    ) -> List[int]:334        # Find all data in the output that matches335        # Apply a list of filters to the data336        # return a single output from a final filter function337        assert_that(testpmd_output).described_as(338            "Could not find output from last testpmd run."339        ).is_not_equal_to("")340        matches = re.findall(341            self._testpmd_output_regex[search_key_constant], testpmd_output342        )343        assert_that(matches).described_as(344            (345                "Could not locate any matches for search key "346                f"{self._testpmd_output_regex[search_key_constant]} "347                "in the test output."348            )349        )350        cast_to_ints = list(map(int, matches))351        cast_to_ints = _discard_first_zeroes(cast_to_ints)352        return _discard_first_and_last_sample(cast_to_ints)353    def populate_performance_data(self) -> None:354        self.rx_pps_data = self.get_data_from_testpmd_output(355            self._rx_pps_key, self._last_run_output356        )357        self.tx_pps_data = self.get_data_from_testpmd_output(358            self._tx_pps_key, self._last_run_output359        )360    def get_mean_rx_pps(self) -> int:361        self._check_pps_data("RX")362        return _mean(self.rx_pps_data)363    def get_mean_tx_pps(self) -> int:364        self._check_pps_data("TX")365        return _mean(self.tx_pps_data)366    def get_max_rx_pps(self) -> int:367        self._check_pps_data("RX")368        return max(self.rx_pps_data)369    def get_max_tx_pps(self) -> int:370        self._check_pps_data("TX")371        return max(self.tx_pps_data)372    def get_min_rx_pps(self) -> int:373        self._check_pps_data("RX")374        return min(self.rx_pps_data)375    def get_min_tx_pps(self) -> int:376        self._check_pps_data("TX")377        return min(self.tx_pps_data)378    def get_mean_tx_pps_sriov_rescind(self) -> Tuple[int, int, int]:379        return self._get_pps_sriov_rescind(self._tx_pps_key)380    def get_mean_rx_pps_sriov_rescind(self) -> Tuple[int, int, int]:381        return self._get_pps_sriov_rescind(self._rx_pps_key)382    def add_sample_apps_to_build_list(self, apps: Union[List[str], None]) -> None:383        if apps:384            self._sample_apps_to_build = apps385        else:386            self._sample_apps_to_build = []387    def __init__(self, *args: Any, **kwargs: Any) -> None:388        super().__init__(*args, **kwargs)389        self._dpdk_source = kwargs.pop("dpdk_source", PACKAGE_MANAGER_SOURCE)390        self._dpdk_branch = kwargs.pop("dpdk_branch", "main")391        self._sample_apps_to_build = kwargs.pop("sample_apps", [])392        self._dpdk_version_info = VersionInfo(0, 0)393        self._testpmd_install_path: str = ""394        self.find_testpmd_binary(assert_on_fail=False)395    def _determine_network_hardware(self) -> None:396        lspci = self.node.tools[Lspci]397        device_list = lspci.get_devices()398        self.is_connect_x3 = any(399            ["ConnectX-3" in dev.device_info for dev in device_list]400        )401    def _check_pps_data_exists(self, rx_or_tx: str) -> None:402        data_attr_name = f"{rx_or_tx.lower()}_pps_data"403        assert_that(hasattr(self, data_attr_name)).described_as(404            (405                f"PPS data ({rx_or_tx}) did not exist for testpmd object. "406                "This indicates either testpmd did not run or the suite is "407                "missing an assert. Contact the test maintainer."408            )409        ).is_true()410    def _check_pps_data(self, rx_or_tx: str) -> None:411        self._check_pps_data_exists(rx_or_tx)412        data_set: List[int] = []413        if rx_or_tx == "RX":414            data_set = self.rx_pps_data415        elif rx_or_tx == "TX":416            data_set = self.tx_pps_data417        else:418            fail(419                "Identifier passed to _check_pps_data was not recognized, "420                f"must be RX or TX. Found {rx_or_tx}"421            )422        assert_that(any(data_set)).described_as(423            f"any({str(data_set)}) resolved to false. Test data was "424            f"empty or all zeroes for dpdktestpmd.{rx_or_tx.lower()}_pps_data."425        ).is_true()426    def _install(self) -> bool:427        self._testpmd_output_after_reenable = ""428        self._testpmd_output_before_rescind = ""429        self._testpmd_output_during_rescind = ""430        self._last_run_output = ""431        self._determine_network_hardware()432        node = self.node433        if isinstance(node.os, Debian):434            repos = node.os.get_repositories()435            backport_repo = f"{node.os.information.codename}-backports"436            if any([backport_repo in repo.name for repo in repos]):437                self._debian_backports_args = [f"-t {backport_repo}"]438            else:439                self._debian_backports_args = []440        self._install_dependencies()441        # installing from distro package manager442        if self.use_package_manager_install():443            self.node.log.info(444                "Installing dpdk and dev package from package manager..."445            )446            if isinstance(node.os, Debian):447                node.os.install_packages(448                    ["dpdk", "dpdk-dev"],449                    extra_args=self._debian_backports_args,450                )451            elif isinstance(node.os, Fedora):452                node.os.install_packages(["dpdk", "dpdk-devel"])453            else:454                raise NotImplementedError(455                    "Dpdk package names are missing in dpdktestpmd.install"456                    f" for os {node.os.name}"457                )458            self._dpdk_version_info = node.os.get_package_information("dpdk")459            self.node.log.info(460                f"Installed DPDK version {str(self._dpdk_version_info)} "461                "from package manager"462            )463            self.find_testpmd_binary()464            self._load_drivers_for_dpdk()465            return True466        # otherwise install from source tarball or git467        self.node.log.info(f"Installing dpdk from source: {self._dpdk_source}")468        self._dpdk_repo_path_name = "dpdk"469        self.dpdk_path = self.node.working_path.joinpath(self._dpdk_repo_path_name)470        if self.find_testpmd_binary(471            assert_on_fail=False, check_path="/usr/local/bin"472        ):  # tools are already installed473            return True474        git_tool = node.tools[Git]475        echo_tool = node.tools[Echo]476        if self._dpdk_source and self._dpdk_source.endswith(".tar.gz"):477            wget_tool = node.tools[Wget]478            tar_tool = node.tools[Tar]479            if self._dpdk_branch:480                node.log.warn(481                    (482                        "DPDK tarball source does not need dpdk_branch defined. "483                        "User-defined variable dpdk_branch will be ignored."484                    )485                )486            working_path = str(node.working_path)487            wget_tool.get(488                self._dpdk_source,489                working_path,490            )491            dpdk_filename = self._dpdk_source.split("/")[-1]492            # extract tar into dpdk/ folder and discard old root folder name493            tar_tool.extract(494                str(node.working_path.joinpath(dpdk_filename)),495                str(self.dpdk_path),496                strip_components=1,497            )498            self.set_version_info_from_source_install(499                self._dpdk_source, self._version_info_from_tarball_regex500            )501        else:502            git_tool.clone(503                self._dpdk_source,504                cwd=node.working_path,505                dir_name=self._dpdk_repo_path_name,506            )507            if not self._dpdk_branch:508                # dpdk stopped using a default branch509                # if a branch is not specified, get latest version tag.510                self._dpdk_branch = git_tool.get_tag(511                    self.dpdk_path, filter=r"^v.*"  # starts w 'v'512                )513            git_tool.checkout(self._dpdk_branch, cwd=self.dpdk_path)514            self.set_version_info_from_source_install(515                self._dpdk_branch, self._version_info_from_git_tag_regex516            )517        self._load_drivers_for_dpdk()518        # add sample apps to compilation if they are present519        if self._sample_apps_to_build:520            sample_apps = f"-Dexamples={','.join(self._sample_apps_to_build)}"521        else:522            sample_apps = ""523        node.execute(524            f"meson {sample_apps} build",525            shell=True,526            cwd=self.dpdk_path,527            expected_exit_code=0,528            expected_exit_code_failure_message=(529                "meson build for dpdk failed, check that"530                "dpdk build has not changed to eliminate the use of meson or "531                "meson version is compatible with this dpdk version and OS."532            ),533        )534        self.dpdk_build_path = self.dpdk_path.joinpath("build")535        node.execute(536            "ninja",537            cwd=self.dpdk_build_path,538            timeout=1800,539            expected_exit_code=0,540            expected_exit_code_failure_message=(541                "ninja build for dpdk failed. check build spew for missing headers "542                "or dependencies. Also check that this ninja version requirement "543                "has not changed for dpdk."544            ),545        )546        node.execute(547            "ninja install",548            cwd=self.dpdk_build_path,549            sudo=True,550            expected_exit_code=0,551            expected_exit_code_failure_message=(552                "ninja install failed for dpdk binaries."553            ),554        )555        node.execute(556            "ldconfig",557            cwd=self.dpdk_build_path,558            sudo=True,559            expected_exit_code=0,560            expected_exit_code_failure_message="ldconfig failed, check for error spew.",561        )562        library_bashrc_lines = [563            "export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}:/usr/local/lib64/pkgconfig/",564            "export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib64/",565        ]566        echo_tool.write_to_file(567            ";".join(library_bashrc_lines),568            node.get_pure_path("~/.bashrc"),569            append=True,570        )571        self.find_testpmd_binary(check_path="/usr/local/bin")572        return True573    def _load_drivers_for_dpdk(self) -> None:574        self.node.log.info("Loading drivers for infiniband, rdma, and mellanox hw...")575        if self.is_connect_x3:576            mellanox_drivers = ["mlx4_core", "mlx4_ib"]577        else:578            mellanox_drivers = ["mlx5_core", "mlx5_ib"]579        modprobe = self.node.tools[Modprobe]580        if isinstance(self.node.os, Ubuntu):581            # Ubuntu shouldn't need any special casing, skip to loading rdma/ib582            pass583        elif isinstance(self.node.os, Debian):584            # NOTE: debian buster doesn't include rdma and ib drivers585            # on 5.4 specifically for linux-image-cloud:586            # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1012639587            # for backports on this release we should update the kernel to latest588            kernel_info = self.node.os.get_kernel_information(force_run=True)589            # update to at least 5.10 (known good for buster linux-image-cloud-(arch))590            if (591                self.node.os.information.codename == "buster"592                and kernel_info.version <= "5.10.0"593            ):594                self.node.log.debug(595                    f"Debian (buster) kernel version found: {str(kernel_info.version)} "596                    "Updating linux-image-cloud to most recent kernel."597                )598                # grab the linux-image package name from kernel version metadata599                linux_image_package = "linux-image-cloud-[a-zA-Z0-9]*"600                self.node.os.install_packages([linux_image_package])601                self.node.reboot()602        elif isinstance(self.node.os, Fedora):603            if not self.is_connect_x3:604                self.node.execute(605                    f"dracut --add-drivers '{' '.join(mellanox_drivers)} ib_uverbs' -f",606                    cwd=self.node.working_path,607                    expected_exit_code=0,608                    expected_exit_code_failure_message=(609                        "Issue loading mlx and ib_uverb drivers into ramdisk."610                    ),611                    sudo=True,612                )613        else:614            raise UnsupportedDistroException(self.node.os)615        rmda_drivers = ["ib_core", "ib_uverbs", "rdma_ucm"]616        # some versions of dpdk require these two, some don't.617        # some systems have them, some don't. Load if they're there.618        for module in ["ib_ipoib", "ib_umad"]:619            if modprobe.module_exists(module):620                rmda_drivers.append(module)621        modprobe.load(rmda_drivers)622        modprobe.load(mellanox_drivers)623    def _install_dependencies(self) -> None:624        node = self.node625        if isinstance(node.os, Ubuntu):626            self._install_ubuntu_dependencies()627        elif isinstance(node.os, Debian):628            node.os.install_packages(629                self._debian_packages, extra_args=self._debian_backports_args630            )631        elif isinstance(node.os, Fedora):632            self._install_fedora_dependencies()633        else:634            raise UnsupportedDistroException(635                node.os, "This OS does not have dpdk installation implemented yet."636            )637    def _install_ubuntu_dependencies(self) -> None:638        node = self.node639        ubuntu = node.os640        if not isinstance(ubuntu, Ubuntu):641            fail(642                "_install_ubuntu_dependencies was called on node "643                f"which was not Ubuntu: {node.os.information.full_version}"644            )645            return  # appease the type checker646        if ubuntu.information.version < "18.4.0":647            raise SkippedException(648                f"Ubuntu {str(ubuntu.information.version)} is not supported. "649                "Minimum documented version for DPDK support is >=18.04"650            )651        elif ubuntu.information.version < "20.4.0":652            ubuntu.install_packages(653                self._ubuntu_packages_1804,654                extra_args=self._debian_backports_args,655            )656            if not self.use_package_manager_install():657                self._install_ninja_and_meson()658        else:659            ubuntu.install_packages(660                self._ubuntu_packages_2004,661                extra_args=self._debian_backports_args,662            )663    def _install_fedora_dependencies(self) -> None:664        node = self.node665        rhel = node.os666        if not isinstance(rhel, Fedora):667            fail(668                "_install_fedora_dependencies was called on node "669                f"which was not Fedora: {node.os.information.full_version}"670            )671            return  # appease the type checker672        # DPDK is very sensitive to rdma-core/kernel mismatches673        # update to latest kernel before instaling dependencies674        rhel.install_packages("kernel")675        node.reboot()676        if rhel.information.version.major == 7:677            # Add packages for rhel7678            rhel.install_packages(["libmnl-devel", "libbpf-devel"])679        try:680            rhel.install_packages("kernel-devel-$(uname -r)")681        except MissingPackagesException:682            node.log.debug("kernel-devel-$(uname -r) not found. Trying kernel-devel")683            rhel.install_packages("kernel-devel")684        # RHEL 8 doesn't require special cases for installed packages.685        # TODO: RHEL9 may require updates upon release686        rhel.group_install_packages("Development Tools")687        rhel.group_install_packages("Infiniband Support")688        rhel.install_packages(self._fedora_packages)689        # ensure RDMA service is started if present.690        service_name = "rdma"691        service = node.tools[Service]692        if service.check_service_exists(service_name):693            if not service.check_service_status(service_name):694                service.enable_service(service_name)695            # some versions of RHEL and CentOS have service.rdma696            # that will refuse manual start/stop and will return697            # NOPERMISSION. This is not fatal and can be continued.698            # If the service is present it should start when needed.699            service.restart_service(700                service_name, ignore_exit_code=service.SYSTEMD_EXIT_NOPERMISSION701            )702        if not self.use_package_manager_install():703            self._install_ninja_and_meson()704    def _install_ninja_and_meson(self) -> None:705        node = self.node706        cwd = node.working_path707        node.execute(708            "pip3 install --upgrade meson",709            cwd=cwd,710            sudo=True,711            expected_exit_code=0,712            expected_exit_code_failure_message=(713                "Failed to update Meson to latest version with pip3"714            ),715        )716        if node.shell.exists(node.get_pure_path("/usr/bin/meson")):...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
