How to use set_version_info_from_source_install method in lisa

Best Python code snippet using lisa_python

dpdktestpmd.py

Source:dpdktestpmd.py Github

copy

Full Screen

...140 if self._dpdk_source == PACKAGE_MANAGER_SOURCE:141 return True142 else:143 return False144 def set_version_info_from_source_install(145 self, branch_identifier: str, matcher: Pattern[str]146 ) -> None:147 match = matcher.search(branch_identifier)148 if not match or not match.group("major") or not match.group("minor"):149 fail(150 f"Could not determine dpdk version info from '{self._dpdk_source}'"151 f" with id: '{branch_identifier}' using regex: '{matcher.pattern}'"152 )153 else:154 major, minor = map(int, [match.group("major"), match.group("minor")])155 self._dpdk_version_info: VersionInfo = VersionInfo(major, minor)156 def generate_testpmd_include(157 self, node_nic: NicInfo, vdev_id: int, use_max_nics: bool = False158 ) -> str:159 # handle generating different flags for pmds/device combos for testpmd160 # identify which nics to inlude in test, exclude others161 # TODO: Test does not make full use of multiple nics yet.162 if use_max_nics:163 self.node.log.warn(164 "NOTE: Testpmd suite does not yet make full use of multiple nics"165 )166 # if use_max_nics:167 # ssh_nic = self.node.nics.get_nic(self.node.nics.default_nic)168 # include_nics = [169 # self.node.nics.get_nic(nic)170 # for nic in self.node.nics.get_upper_nics()171 # if nic != ssh_nic.upper172 # ]173 # exclude_nics = [ssh_nic]174 # else:175 include_nics = [node_nic]176 exclude_nics = [177 self.node.nics.get_nic(nic)178 for nic in self.node.nics.get_upper_nics()179 if nic != node_nic.upper180 ]181 # build list of vdev info flags for each nic182 vdev_info = ""183 self.node.log.info(f"Running test with {len(include_nics)} nics.")184 for nic in include_nics:185 if self._dpdk_version_info and self._dpdk_version_info >= "18.11.0":186 vdev_name = "net_vdev_netvsc"187 vdev_flags = f"iface={nic.upper},force=1"188 else:189 vdev_name = "net_failsafe"190 vdev_flags = (191 f"dev({nic.pci_slot}),dev(net_tap0,iface={nic.upper},force=1)"192 )193 if nic.bound_driver == "hv_netvsc":194 vdev_info += f'--vdev="{vdev_name}{vdev_id},{vdev_flags}" '195 elif nic.bound_driver == "uio_hv_generic":196 pass197 else:198 fail(199 (200 f"Unknown driver({nic.bound_driver}) bound to "201 f"{nic.upper}/{nic.lower}."202 "Cannot generate testpmd include arguments."203 )204 )205 # exclude pci slots not associated with the test nic206 exclude_flags = ""207 for nic in exclude_nics:208 exclude_flags += f' -b "{nic.pci_slot}"'209 return vdev_info + exclude_flags210 def _calculate_core_count(211 self,212 cores_available: int,213 txq: int,214 rxq: int,215 use_max_nics: bool,216 service_cores: int = 1,217 ) -> int:218 # Use either:219 # - as many cores as are available, minus a core for the system220 # - 1 per queue on each nic + one per NIC PMD221 # this is a no-op for now,222 # test does not correctly handle multiple nics yet223 if use_max_nics:224 pass225 nics_available = 1226 return min(227 cores_available - 1,228 (nics_available * (txq + rxq)) + (nics_available * service_cores),229 )230 def generate_testpmd_command(231 self,232 nic_to_include: NicInfo,233 vdev_id: int,234 mode: str,235 pmd: str,236 extra_args: str = "",237 txq: int = 0,238 rxq: int = 0,239 service_cores: int = 1,240 use_max_nics: bool = False,241 ) -> str:242 # testpmd \243 # -l <core-list> \244 # -n <num of mem channels> \245 # -w <pci address of the device you plan to use> \246 # --vdev="net_vdev_netvsc<id>,iface=<the iface to attach to>" \247 # -- --port-topology=chained \248 # --nb-cores <number of cores to use for test pmd> \249 # --forward-mode=txonly \250 # --eth-peer=<port id>,<receiver peer MAC address> \251 # --stats-period <display interval in seconds>252 # if test asks for multicore, it implies using more than one nic253 # otherwise default core count for single nic will be used254 # and then adjusted for queue count255 if not (rxq or txq):256 txq = 1257 rxq = 1258 # calculate the available cores per numa node, infer the offset259 # required for core selection argument260 cores_available = self.node.tools[Lscpu].get_core_count()261 numa_node_count = self.node.tools[Lscpu].get_numa_node_count()262 nic_numa_node = self.node.nics._get_nic_numa_node(nic_to_include.lower)263 cores_per_numa = cores_available // numa_node_count264 numa_core_offset = cores_per_numa * nic_numa_node265 # calculate how many cores to use based on txq/rxq per nic and how many nics266 use_core_count = self._calculate_core_count(267 cores_per_numa, txq, rxq, use_max_nics, service_cores=service_cores268 )269 nic_include_info = self.generate_testpmd_include(270 nic_to_include, vdev_id, use_max_nics271 )272 # set up queue arguments273 if txq or rxq:274 # set number of queues to use for txq and rxq (per nic, default is 1)275 assert_that(txq).described_as(276 "TX queue value must be greater than 0 if txq is used"277 ).is_greater_than(0)278 assert_that(rxq).described_as(279 "RX queue value must be greater than 0 if rxq is used"280 ).is_greater_than(0)281 extra_args += f" --txq={txq} --rxq={rxq} "282 assert_that(use_core_count).described_as(283 "Selection asked for more cores than were available for numa "284 f"{nic_numa_node}. Requested {use_core_count}"285 ).is_less_than_or_equal_to(cores_per_numa)286 # use the selected amount of cores, adjusting for 0 index.287 core_args = f"-l {numa_core_offset}-{numa_core_offset + use_core_count-1}"288 return (289 f"{self._testpmd_install_path} {core_args} -n 4 --proc-type=primary "290 f"{nic_include_info} -- --forward-mode={mode} {extra_args} "291 "-a --stats-period 2 --port-topology=chained"292 )293 def run_for_n_seconds(self, cmd: str, timeout: int) -> str:294 self._last_run_timeout = timeout295 self.node.log.info(f"{self.node.name} running: {cmd}")296 proc_result = self.node.tools[Timeout].run_with_timeout(297 cmd, timeout, SIGINT, kill_timeout=timeout + 10298 )299 self._last_run_output = proc_result.stdout300 self.populate_performance_data()301 return proc_result.stdout302 def check_testpmd_is_running(self) -> bool:303 pids = self.node.tools[Pidof].get_pids(self.command, sudo=True)304 return len(pids) > 0305 def kill_previous_testpmd_command(self) -> None:306 # kill testpmd early307 self.node.tools[Kill].by_name(self.command, ignore_not_exist=True)308 if self.check_testpmd_is_running():309 self.node.log.debug(310 "Testpmd is not responding to signals, "311 "attempt network connection reset."312 )313 # reset node connections (quicker and less risky than netvsc reset)314 self.node.close()315 if not self.check_testpmd_is_running():316 return317 self.node.log.debug(318 "Testpmd is not responding to signals, attempt reload of hv_netvsc."319 )320 # if this somehow didn't kill it, reset netvsc321 self.node.tools[Modprobe].reload(["hv_netvsc"])322 if self.check_testpmd_is_running():323 raise LisaException("Testpmd has hung, killing the test.")324 else:325 self.node.log.debug(326 "Testpmd killed with hv_netvsc reload. "327 "Proceeding with processing test run results."328 )329 def get_data_from_testpmd_output(330 self,331 search_key_constant: str,332 testpmd_output: str,333 ) -> List[int]:334 # Find all data in the output that matches335 # Apply a list of filters to the data336 # return a single output from a final filter function337 assert_that(testpmd_output).described_as(338 "Could not find output from last testpmd run."339 ).is_not_equal_to("")340 matches = re.findall(341 self._testpmd_output_regex[search_key_constant], testpmd_output342 )343 assert_that(matches).described_as(344 (345 "Could not locate any matches for search key "346 f"{self._testpmd_output_regex[search_key_constant]} "347 "in the test output."348 )349 )350 cast_to_ints = list(map(int, matches))351 cast_to_ints = _discard_first_zeroes(cast_to_ints)352 return _discard_first_and_last_sample(cast_to_ints)353 def populate_performance_data(self) -> None:354 self.rx_pps_data = self.get_data_from_testpmd_output(355 self._rx_pps_key, self._last_run_output356 )357 self.tx_pps_data = self.get_data_from_testpmd_output(358 self._tx_pps_key, self._last_run_output359 )360 def get_mean_rx_pps(self) -> int:361 self._check_pps_data("RX")362 return _mean(self.rx_pps_data)363 def get_mean_tx_pps(self) -> int:364 self._check_pps_data("TX")365 return _mean(self.tx_pps_data)366 def get_max_rx_pps(self) -> int:367 self._check_pps_data("RX")368 return max(self.rx_pps_data)369 def get_max_tx_pps(self) -> int:370 self._check_pps_data("TX")371 return max(self.tx_pps_data)372 def get_min_rx_pps(self) -> int:373 self._check_pps_data("RX")374 return min(self.rx_pps_data)375 def get_min_tx_pps(self) -> int:376 self._check_pps_data("TX")377 return min(self.tx_pps_data)378 def get_mean_tx_pps_sriov_rescind(self) -> Tuple[int, int, int]:379 return self._get_pps_sriov_rescind(self._tx_pps_key)380 def get_mean_rx_pps_sriov_rescind(self) -> Tuple[int, int, int]:381 return self._get_pps_sriov_rescind(self._rx_pps_key)382 def add_sample_apps_to_build_list(self, apps: Union[List[str], None]) -> None:383 if apps:384 self._sample_apps_to_build = apps385 else:386 self._sample_apps_to_build = []387 def __init__(self, *args: Any, **kwargs: Any) -> None:388 super().__init__(*args, **kwargs)389 self._dpdk_source = kwargs.pop("dpdk_source", PACKAGE_MANAGER_SOURCE)390 self._dpdk_branch = kwargs.pop("dpdk_branch", "main")391 self._sample_apps_to_build = kwargs.pop("sample_apps", [])392 self._dpdk_version_info = VersionInfo(0, 0)393 self._testpmd_install_path: str = ""394 self.find_testpmd_binary(assert_on_fail=False)395 def _determine_network_hardware(self) -> None:396 lspci = self.node.tools[Lspci]397 device_list = lspci.get_devices()398 self.is_connect_x3 = any(399 ["ConnectX-3" in dev.device_info for dev in device_list]400 )401 def _check_pps_data_exists(self, rx_or_tx: str) -> None:402 data_attr_name = f"{rx_or_tx.lower()}_pps_data"403 assert_that(hasattr(self, data_attr_name)).described_as(404 (405 f"PPS data ({rx_or_tx}) did not exist for testpmd object. "406 "This indicates either testpmd did not run or the suite is "407 "missing an assert. Contact the test maintainer."408 )409 ).is_true()410 def _check_pps_data(self, rx_or_tx: str) -> None:411 self._check_pps_data_exists(rx_or_tx)412 data_set: List[int] = []413 if rx_or_tx == "RX":414 data_set = self.rx_pps_data415 elif rx_or_tx == "TX":416 data_set = self.tx_pps_data417 else:418 fail(419 "Identifier passed to _check_pps_data was not recognized, "420 f"must be RX or TX. Found {rx_or_tx}"421 )422 assert_that(any(data_set)).described_as(423 f"any({str(data_set)}) resolved to false. Test data was "424 f"empty or all zeroes for dpdktestpmd.{rx_or_tx.lower()}_pps_data."425 ).is_true()426 def _install(self) -> bool:427 self._testpmd_output_after_reenable = ""428 self._testpmd_output_before_rescind = ""429 self._testpmd_output_during_rescind = ""430 self._last_run_output = ""431 self._determine_network_hardware()432 node = self.node433 if isinstance(node.os, Debian):434 repos = node.os.get_repositories()435 backport_repo = f"{node.os.information.codename}-backports"436 if any([backport_repo in repo.name for repo in repos]):437 self._debian_backports_args = [f"-t {backport_repo}"]438 else:439 self._debian_backports_args = []440 self._install_dependencies()441 # installing from distro package manager442 if self.use_package_manager_install():443 self.node.log.info(444 "Installing dpdk and dev package from package manager..."445 )446 if isinstance(node.os, Debian):447 node.os.install_packages(448 ["dpdk", "dpdk-dev"],449 extra_args=self._debian_backports_args,450 )451 elif isinstance(node.os, Fedora):452 node.os.install_packages(["dpdk", "dpdk-devel"])453 else:454 raise NotImplementedError(455 "Dpdk package names are missing in dpdktestpmd.install"456 f" for os {node.os.name}"457 )458 self._dpdk_version_info = node.os.get_package_information("dpdk")459 self.node.log.info(460 f"Installed DPDK version {str(self._dpdk_version_info)} "461 "from package manager"462 )463 self.find_testpmd_binary()464 self._load_drivers_for_dpdk()465 return True466 # otherwise install from source tarball or git467 self.node.log.info(f"Installing dpdk from source: {self._dpdk_source}")468 self._dpdk_repo_path_name = "dpdk"469 self.dpdk_path = self.node.working_path.joinpath(self._dpdk_repo_path_name)470 if self.find_testpmd_binary(471 assert_on_fail=False, check_path="/usr/local/bin"472 ): # tools are already installed473 return True474 git_tool = node.tools[Git]475 echo_tool = node.tools[Echo]476 if self._dpdk_source and self._dpdk_source.endswith(".tar.gz"):477 wget_tool = node.tools[Wget]478 tar_tool = node.tools[Tar]479 if self._dpdk_branch:480 node.log.warn(481 (482 "DPDK tarball source does not need dpdk_branch defined. "483 "User-defined variable dpdk_branch will be ignored."484 )485 )486 working_path = str(node.working_path)487 wget_tool.get(488 self._dpdk_source,489 working_path,490 )491 dpdk_filename = self._dpdk_source.split("/")[-1]492 # extract tar into dpdk/ folder and discard old root folder name493 tar_tool.extract(494 str(node.working_path.joinpath(dpdk_filename)),495 str(self.dpdk_path),496 strip_components=1,497 )498 self.set_version_info_from_source_install(499 self._dpdk_source, self._version_info_from_tarball_regex500 )501 else:502 git_tool.clone(503 self._dpdk_source,504 cwd=node.working_path,505 dir_name=self._dpdk_repo_path_name,506 )507 if not self._dpdk_branch:508 # dpdk stopped using a default branch509 # if a branch is not specified, get latest version tag.510 self._dpdk_branch = git_tool.get_tag(511 self.dpdk_path, filter=r"^v.*" # starts w 'v'512 )513 git_tool.checkout(self._dpdk_branch, cwd=self.dpdk_path)514 self.set_version_info_from_source_install(515 self._dpdk_branch, self._version_info_from_git_tag_regex516 )517 self._load_drivers_for_dpdk()518 # add sample apps to compilation if they are present519 if self._sample_apps_to_build:520 sample_apps = f"-Dexamples={','.join(self._sample_apps_to_build)}"521 else:522 sample_apps = ""523 node.execute(524 f"meson {sample_apps} build",525 shell=True,526 cwd=self.dpdk_path,527 expected_exit_code=0,528 expected_exit_code_failure_message=(...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful