How to use del_port method in autotest

Best Python code snippet using autotest_python

test_pmd_rebal.py

Source:test_pmd_rebal.py Github

copy

Full Screen

...173 self.assertEqual(n_reb_rxq, -1)174 # 2. check pmd load175 self.assertEqual(dataif.pmd_load(pmd), 96.0)176 # del port object from pmd.177 pmd.del_port(port_name)178 # Test case:179 # With one pmd thread handling few one single-queue ports, check whether180 # rebalance is skipped.181 def test_many_rxq(self):182 # retrieve pmd object.183 pmd = self.pmd_map[self.core_id]184 # one few dummy ports required for this test.185 for port_name in ('virtport1', 'virtport2', 'virtport3'):186 # create port class of name 'virtport'.187 dataif.make_dataif_port(port_name)188 # add port object into pmd.189 fx_port = pmd.add_port(port_name)190 fx_port.numa_id = pmd.numa_id191 # add a dummy rxq into port.192 fx_rxq = fx_port.add_rxq(0)193 # add some cpu consumption for this rxq.194 for i in range(0, config.ncd_samples_max):195 fx_rxq.cpu_cyc[i] = 32196 fx_rxq.rx_cyc[i] = 32197 # test dryrun198 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)199 # validate results200 # 1. no rxqs be rebalanced.201 self.assertEqual(n_reb_rxq, -1)202 # 2. check pmd load203 self.assertEqual(dataif.pmd_load(pmd), 96.0)204 # del port object from pmd.205 for port_name in ('virtport1', 'virtport2', 'virtport3'):206 pmd.del_port(port_name)207# Fixture:208# create two pmd thread objects where in, each has one single-queued port.209def fx_2pmd_each_1rxq(testobj):210 # retrieve pmd object.211 pmd1 = testobj.pmd_map[testobj.core1_id]212 pmd2 = testobj.pmd_map[testobj.core2_id]213 # one dummy port is required for this test.214 port1_name = 'virtport1'215 port2_name = 'virtport2'216 # create port class of name 'virtport'.217 dataif.make_dataif_port(port1_name)218 dataif.make_dataif_port(port2_name)219 # add port object into pmd.220 fx_port1 = pmd1.add_port(port1_name)221 fx_port1.numa_id = pmd1.numa_id222 fx_port2 = pmd2.add_port(port2_name)223 fx_port2.numa_id = pmd2.numa_id224 # add a dummy rxq into port.225 fx_p1rxq = fx_port1.add_rxq(0)226 fx_p1rxq.pmd = pmd1227 fx_p2rxq = fx_port2.add_rxq(0)228 fx_p2rxq.pmd = pmd2229 # add some cpu consumption for these rxqs.230 for i in range(0, config.ncd_samples_max):231 fx_p1rxq.cpu_cyc[i] = 96232 fx_p1rxq.rx_cyc[i] = 96233 fx_p2rxq.cpu_cyc[i] = 90234 fx_p2rxq.rx_cyc[i] = 90235# Fixture:236# Create two pmd thread objects where in, one pmd has two single-queued237# ports, while the other is idle (without any port/rxq).238def fx_2pmd_one_empty(testobj):239 # retrieve pmd object.240 pmd1 = testobj.pmd_map[testobj.core1_id]241 # one dummy port is required for this test.242 port1_name = 'virtport1'243 port2_name = 'virtport2'244 # create port class of name 'virtport'.245 dataif.make_dataif_port(port1_name)246 dataif.make_dataif_port(port2_name)247 # add port object into pmd.248 fx_port1 = pmd1.add_port(port1_name)249 fx_port1.numa_id = pmd1.numa_id250 # add second port as well into pmd 1 for imbalance.251 fx_port2 = pmd1.add_port(port2_name)252 fx_port2.numa_id = pmd1.numa_id253 # add a dummy rxq into port.254 fx_p1rxq = fx_port1.add_rxq(0)255 fx_p1rxq.pmd = pmd1256 fx_p2rxq = fx_port2.add_rxq(0)257 fx_p2rxq.pmd = pmd1258 # add some cpu consumption for these rxqs.259 for i in range(0, config.ncd_samples_max):260 fx_p1rxq.cpu_cyc[i] = 6261 fx_p1rxq.rx_cyc[i] = 6262 fx_p2rxq.cpu_cyc[i] = 90263 fx_p2rxq.rx_cyc[i] = 90264# Fixture:265# Create two pmd thread objects where in, each pmd has two single-queued266# ports.267def fx_2pmd_each_2rxq(testobj):268 # retrieve pmd object.269 pmd1 = testobj.pmd_map[testobj.core1_id]270 pmd2 = testobj.pmd_map[testobj.core2_id]271 # one dummy port is required for this test.272 port1_name = 'virtport1'273 port2_name = 'virtport2'274 port3_name = 'virtport3'275 port4_name = 'virtport4'276 # create port class of name 'virtport'.277 dataif.make_dataif_port(port1_name)278 dataif.make_dataif_port(port2_name)279 dataif.make_dataif_port(port3_name)280 dataif.make_dataif_port(port4_name)281 # add port object into pmd.282 fx_port1 = pmd1.add_port(port1_name)283 fx_port1.numa_id = pmd1.numa_id284 fx_port2 = pmd2.add_port(port2_name)285 fx_port2.numa_id = pmd2.numa_id286 fx_port3 = pmd1.add_port(port3_name)287 fx_port3.numa_id = pmd1.numa_id288 fx_port4 = pmd2.add_port(port4_name)289 fx_port4.numa_id = pmd2.numa_id290 # add a dummy rxq into port.291 fx_p1rxq = fx_port1.add_rxq(0)292 fx_p1rxq.pmd = pmd1293 fx_p2rxq = fx_port2.add_rxq(0)294 fx_p2rxq.pmd = pmd2295 fx_p3rxq = fx_port3.add_rxq(0)296 fx_p3rxq.pmd = pmd1297 fx_p4rxq = fx_port4.add_rxq(0)298 fx_p4rxq.pmd = pmd2299 # add some cpu consumption for these rxqs.300 # order of rxqs based on cpu consumption: rxqp1,rxqp2,rxqp3,rxqp4301 for i in range(0, config.ncd_samples_max):302 fx_p1rxq.cpu_cyc[i] = 70303 fx_p1rxq.rx_cyc[i] = 70304 fx_p2rxq.cpu_cyc[i] = 65305 fx_p2rxq.rx_cyc[i] = 65306 fx_p3rxq.cpu_cyc[i] = 26307 fx_p3rxq.rx_cyc[i] = 26308 fx_p4rxq.cpu_cyc[i] = 25309 fx_p4rxq.rx_cyc[i] = 25310# Fixture:311# Create two pmd thread objects where in, two queued ports split312# among pmds.313def fx_2pmd_each_1p2rxq(testobj):314 # retrieve pmd object.315 pmd1 = testobj.pmd_map[testobj.core1_id]316 pmd2 = testobj.pmd_map[testobj.core2_id]317 # dummy ports required for this test.318 port1_name = 'virtport1'319 port2_name = 'virtport2'320 # create port class of name 'virtport'.321 dataif.make_dataif_port(port1_name)322 dataif.make_dataif_port(port2_name)323 # add port object into pmd.324 fx_port11 = pmd1.add_port(port1_name)325 fx_port11.numa_id = pmd1.numa_id326 fx_port22 = pmd2.add_port(port2_name)327 fx_port22.numa_id = pmd2.numa_id328 fx_port21 = pmd1.add_port(port2_name)329 fx_port21.numa_id = pmd1.numa_id330 fx_port12 = pmd2.add_port(port1_name)331 fx_port12.numa_id = pmd2.numa_id332 # add a dummy rxq into port.333 fx_p1rxq1 = fx_port11.add_rxq(0)334 fx_p1rxq1.pmd = pmd1335 fx_p2rxq2 = fx_port22.add_rxq(1)336 fx_p2rxq2.pmd = pmd2337 fx_p2rxq1 = fx_port21.add_rxq(0)338 fx_p2rxq1.pmd = pmd1339 fx_p1rxq2 = fx_port12.add_rxq(1)340 fx_p1rxq2.pmd = pmd2341 # add some cpu consumption for these rxqs.342 # order of rxqs based on cpu consumption: rxq1p1,rxq2p2,rxq1p2,rxq2p1343 for i in range(0, config.ncd_samples_max):344 fx_p1rxq1.cpu_cyc[i] = 90345 fx_p1rxq1.rx_cyc[i] = 90346 fx_p2rxq2.cpu_cyc[i] = 60347 fx_p2rxq2.rx_cyc[i] = 60348 fx_p1rxq2.cpu_cyc[i] = 30349 fx_p1rxq2.rx_cyc[i] = 30350 fx_p2rxq1.cpu_cyc[i] = 6351 fx_p2rxq1.rx_cyc[i] = 6352class TestRebalDryrun_TwoPmd(TestCase):353 """354 Test rebalance for one or more rxq handled by two pmds.355 """356 rebalance_dryrun = dataif.rebalance_dryrun_by_cyc357 pmd_map = dict()358 core1_id = 0359 core2_id = 1360 # setup test environment361 def setUp(self):362 util.Memoize.forgot = True363 # turn off limited info shown in assert failure for pmd object.364 self.maxDiff = None365 dataif.Context.nlog = NlogNoop()366 # create one pmd object.367 fx_pmd1 = dataif.Dataif_Pmd(self.core1_id)368 fx_pmd2 = dataif.Dataif_Pmd(self.core2_id)369 # let it be in numa 0.370 fx_pmd1.numa_id = 0371 fx_pmd2.numa_id = 0372 # add some cpu consumption for these pmds.373 for i in range(0, config.ncd_samples_max):374 fx_pmd1.idle_cpu_cyc[i] = (4 * (i + 1))375 fx_pmd1.proc_cpu_cyc[i] = (96 * (i + 1))376 fx_pmd1.rx_cyc[i] = (96 * (i + 1))377 fx_pmd1.cyc_idx = config.ncd_samples_max - 1378 for i in range(0, config.ncd_samples_max):379 fx_pmd2.idle_cpu_cyc[i] = (10 * (i + 1))380 fx_pmd2.proc_cpu_cyc[i] = (90 * (i + 1))381 fx_pmd2.rx_cyc[i] = (90 * (i + 1))382 fx_pmd2.cyc_idx = config.ncd_samples_max - 1383 self.pmd_map[self.core1_id] = fx_pmd1384 self.pmd_map[self.core2_id] = fx_pmd2385 return386 # Test case:387 # With two threads from same numa, each handling only one single-queued388 # port, check whether rebalance is skipped.389 def test_one_rxq_lnuma(self):390 # set same numa for pmds391 pmd1 = self.pmd_map[self.core1_id]392 pmd2 = self.pmd_map[self.core2_id]393 pmd1.numa_id = 0394 pmd2.numa_id = 0395 # create rxq396 fx_2pmd_each_1rxq(self)397 # update pmd load values398 dataif.update_pmd_load(self.pmd_map)399 # copy original pmd objects400 pmd_map = copy.deepcopy(self.pmd_map)401 # test dryrun402 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)403 # validate results404 # 1. all two rxqs be rebalanced.405 self.assertEqual(n_reb_rxq, -1, "no rebalance expected")406 # 2. each pmd is not updated.407 self.assertEqual(pmd_map[self.core1_id], pmd1)408 self.assertEqual(pmd_map[self.core2_id], pmd2)409 # 3. check pmd load410 self.assertEqual(dataif.pmd_load(pmd1), 96.0)411 self.assertEqual(dataif.pmd_load(pmd2), 90.0)412 # del port object from pmd.413 # TODO: create fx_ post deletion routine for clean up414 pmd1.del_port('virtport1')415 pmd2.del_port('virtport2')416 # Test case:417 # With two threads from same numa, where one pmd thread is handling418 # two single-queued ports, while the other pmd is empty,419 # check whether rebalance is performed.420 # Scope is to check if only one rxq is moved to empty pmd.421 #422 # order of rxqs based on cpu consumption: rxqp2,rxqp1423 # order of pmds for rebalance dryrun: pmd1,pmd2424 #425 # 1. rxqp2(pmd1) -NOREB-> rxqp2(pmd1)426 # rxqp1(pmd1)427 # - (pmd2)428 #429 # 2. rxqp2(pmd1) -NOREB-> rxqp2(pmd1)430 # rxqp1(pmd1) --+--+-> rxqp1(reb_pmd2)431 #432 @mock.patch('netcontrold.lib.util.open')433 def test_two_1rxq_with_empty_lnuma(self, mock_open):434 mock_open.side_effect = [435 mock.mock_open(read_data=_FX_CPU_INFO).return_value436 ]437 # set same numa for pmds438 pmd1 = self.pmd_map[self.core1_id]439 pmd2 = self.pmd_map[self.core2_id]440 pmd1.numa_id = 0441 pmd2.numa_id = 0442 # let pmd2 be idle443 for i in range(0, config.ncd_samples_max):444 pmd2.idle_cpu_cyc[i] = (100 * (i + 1))445 pmd2.proc_cpu_cyc[i] = (0 * (i + 1))446 pmd2.rx_cyc[i] = (0 * (i + 1))447 # create rxq448 fx_2pmd_one_empty(self)449 # update pmd load values450 dataif.update_pmd_load(self.pmd_map)451 # copy original pmd objects452 pmd_map = copy.deepcopy(self.pmd_map)453 # test dryrun454 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)455 # validate results456 # 1. all two rxqs be rebalanced.457 self.assertEqual(n_reb_rxq, 1, "one rxq to be rebalanced")458 # 2. each pmd is updated.459 self.assertNotEqual(pmd_map[self.core1_id], pmd1)460 self.assertNotEqual(pmd_map[self.core2_id], pmd2)461 # 3. check rxq map after dryrun.462 port1 = pmd1.find_port_by_name('virtport1')463 port2 = pmd1.find_port_by_name('virtport2')464 # 3.a rxqp2 remains in pmd1465 self.assertEqual(port2.rxq_rebalanced, {})466 self.assertEqual(port2.find_rxq_by_id(0).pmd.id, pmd1.id)467 # 3.a rxqp1 moves into pmd2468 self.assertEqual(port1.rxq_rebalanced[0], pmd2.id)469 # 4. check pmd load470 self.assertEqual(dataif.pmd_load(pmd1), 90.0)471 self.assertEqual(dataif.pmd_load(pmd2), 6.0)472 # del port object from pmd.473 # TODO: create fx_ post deletion routine for clean up474 pmd1.del_port('virtport1')475 pmd1.del_port('virtport2')476 # Test case:477 # With two threads from same numa, where each pmd thread is handling478 # one queue from two-queued ports. check whether rebalance is performed.479 # Scope is to check if rxq affinity is retained.480 #481 # order of rxqs based on cpu consumption: rxq1p1,rxq2p2,rxq2p1,rxq1p2482 # order of pmds for rebalance dryrun: pmd1,pmd2,pmd2,pmd1483 #484 # 1. rxq1p1(pmd1) 90% -NOREB-> rxq1p1(pmd1)485 # rxq1p2(pmd1) 6% -NOREB-> rxq1p2(pmd1)486 # rxq2p2(pmd2) 60% -NOREB-> rxq2p2(pmd2)487 # rxq2p1(pmd2) 30% -NOREB-> rxq2p1(pmd1)488 #489 @mock.patch('netcontrold.lib.util.open')490 def test_two_1p2rxq_lnuma_norb(self, mock_open):491 mock_open.side_effect = [492 mock.mock_open(read_data=_FX_CPU_INFO).return_value493 ]494 # set same numa for pmds495 pmd1 = self.pmd_map[self.core1_id]496 pmd2 = self.pmd_map[self.core2_id]497 pmd1.numa_id = 0498 pmd2.numa_id = 0499 # create rxq500 fx_2pmd_each_1p2rxq(self)501 # update pmd load values502 dataif.update_pmd_load(self.pmd_map)503 # copy original pmd objects504 pmd_map = copy.deepcopy(self.pmd_map)505 # test dryrun506 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)507 # validate results508 # 1. all four rxqs be rebalanced.509 self.assertEqual(n_reb_rxq, 0, "no rebalance expected")510 # 2. each pmd is updated.511 self.assertEqual(pmd_map[self.core1_id], pmd1)512 self.assertEqual(pmd_map[self.core2_id], pmd2)513 # 3. check rxq map after dryrun.514 port11 = pmd1.find_port_by_name('virtport1')515 port12 = pmd2.find_port_by_name('virtport1')516 port21 = pmd1.find_port_by_name('virtport2')517 port22 = pmd2.find_port_by_name('virtport2')518 self.assertEqual(port11.rxq_rebalanced, {})519 self.assertEqual(port22.rxq_rebalanced, {})520 self.assertEqual(port21.rxq_rebalanced, {})521 self.assertEqual(port12.rxq_rebalanced, {})522 # 4. check pmd load523 self.assertEqual(dataif.pmd_load(pmd1), 96.0)524 self.assertEqual(dataif.pmd_load(pmd2), 90.0)525 # del port object from pmd.526 # TODO: create fx_ post deletion routine for clean up527 pmd1.del_port('virtport1')528 pmd2.del_port('virtport2')529 # Test case:530 # With two threads from same numa, where each pmd thread is handling531 # one queue from two-queued ports. check whether rebalance is performed.532 # Scope is to check if rebalancing is not done on pmd that already533 # has same port but different rxq.534 # ( For now rebalance is allowed by switch, so follow)535 #536 # order of rxqs based on cpu consumption: rxq2p2,rxq1p1,rxq1p3,537 # rxq1p2,rxq2p1538 # order of pmds for rebalance dryrun: pmd2,pmd1,pmd1,pmd2,pmd2539 #540 # 1. rxq1p1(pmd1) 66% -NOREB-> rxq1p1(pmd1)541 # rxq1p3(pmd1) 22%542 # rxq1p2(pmd1) 8%543 # rxq2p2(pmd2) 86% -NOREB-> rxq2p2(pmd2)544 # rxq2p1(pmd2) 4%545 #546 # 2. rxq2p2(pmd2) 86% -NOREB-> rxq2p2(pmd2)547 # rxq1p1(pmd1) 66% -NOREB-> rxq1p1(pmd1)548 # rxq1p3(pmd1) 22% -NOREB-> rxq1p3(pmd1)549 # rxq1p2(pmd1) 8% --+--+-> rxq1p2(reb_pmd2)550 # rxq2p1(pmd2) 4% -NOREB-> rxq2p1(pmd2)551 #552 @mock.patch('netcontrold.lib.util.open')553 def test_two_1p2rxq_lnuma(self, mock_open):554 mock_open.side_effect = [555 mock.mock_open(read_data=_FX_CPU_INFO).return_value556 ]557 # set same numa for pmds558 pmd1 = self.pmd_map[self.core1_id]559 pmd2 = self.pmd_map[self.core2_id]560 pmd1.numa_id = 0561 pmd2.numa_id = 0562 # create rxq563 fx_2pmd_each_1p2rxq(self)564 # we need an extra port to break retaining some ports.565 dataif.make_dataif_port('virtport3')566 port31 = pmd1.add_port('virtport3')567 port31.numa_id = pmd1.numa_id568 p3rxq1 = port31.add_rxq(0)569 p3rxq1.pmd = pmd1570 # update some cpu consumption for these rxqs.571 # order of rxqs based on cpu consumption:572 # rxq2p2,rxq1p1,rxq1p3,rxq1p2,rxq2p1573 port11 = pmd1.find_port_by_name('virtport1')574 port12 = pmd2.find_port_by_name('virtport1')575 port21 = pmd1.find_port_by_name('virtport2')576 port22 = pmd2.find_port_by_name('virtport2')577 p1rxq1 = port11.find_rxq_by_id(0)578 p1rxq2 = port12.find_rxq_by_id(1)579 p2rxq1 = port21.find_rxq_by_id(0)580 p2rxq2 = port22.find_rxq_by_id(1)581 for i in range(0, config.ncd_samples_max):582 p2rxq2.cpu_cyc[i] = 86583 p1rxq1.cpu_cyc[i] = 66584 p3rxq1.cpu_cyc[i] = 22585 p2rxq1.cpu_cyc[i] = 8586 p1rxq2.cpu_cyc[i] = 4587 p2rxq2.rx_cyc[i] = 86588 p1rxq1.rx_cyc[i] = 66589 p3rxq1.rx_cyc[i] = 22590 p2rxq1.rx_cyc[i] = 8591 p1rxq2.rx_cyc[i] = 4592 # update pmd load values593 dataif.update_pmd_load(self.pmd_map)594 # copy original pmd objects595 pmd_map = copy.deepcopy(self.pmd_map)596 # test dryrun597 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)598 # validate results599 # 1. all four rxqs be rebalanced.600 self.assertEqual(n_reb_rxq, 1, "one rxq to be rebalanced")601 # 2. each pmd is updated.602 self.assertNotEqual(pmd_map[self.core1_id], pmd1)603 self.assertNotEqual(pmd_map[self.core2_id], pmd2)604 # 3. check rxq map after dryrun.605 self.assertEqual(port11.rxq_rebalanced, {})606 self.assertEqual(port22.rxq_rebalanced, {})607 self.assertEqual(port31.rxq_rebalanced, {})608 self.assertEqual(port21.rxq_rebalanced[0], pmd2.id)609 self.assertEqual(port12.rxq_rebalanced, {})610 # 4. check pmd load611 self.assertEqual(dataif.pmd_load(pmd1), 88.0)612 self.assertEqual(dataif.pmd_load(pmd2), 98.0)613 # del port object from pmd.614 # TODO: create fx_ post deletion routine for clean up615 pmd1.del_port('virtport1')616 pmd1.del_port('virtport3')617 pmd2.del_port('virtport2')618 # Test case:619 # With two threads from same numa, where each pmd thread is handling620 # two single-queued ports. check whether rebalance is performed.621 # Scope is to check if rxq from a pmd which was a rebalancing pmd622 # before, is assigned other pmd successfully.623 #624 # order of rxqs based on cpu consumption: rxqp1,rxqp2,rxqp3,rxqp4625 # order of pmds for rebalance dryrun: pmd1,pmd2,pmd2,pmd1626 #627 # 1. rxqp1(pmd1) -NOREB-> rxqp1(pmd1)628 # rxqp2(pmd2) -NOREB-> rxqp2(pmd2)629 # rxqp3(pmd1) --+--+-> rxqp3(reb_pmd2)630 # rxqp4(pmd2)631 #632 # 2. rxqp1(pmd1) -NOREB-> rxqp1(pmd1)633 # rxqp2(pmd2) -NOREB-> rxqp2(pmd2)634 # rxqp3(pmd1) --+--+-> rxqp3(reb_pmd2)635 # rxqp4(pmd2) --+--+-> rxqp4(reb_pmd1)636 #637 @mock.patch('netcontrold.lib.util.open')638 def test_four_1rxq_lnuma(self, mock_open):639 mock_open.side_effect = [640 mock.mock_open(read_data=_FX_CPU_INFO).return_value641 ]642 # set same numa for pmds643 pmd1 = self.pmd_map[self.core1_id]644 pmd2 = self.pmd_map[self.core2_id]645 pmd1.numa_id = 0646 pmd2.numa_id = 0647 # create rxq648 fx_2pmd_each_2rxq(self)649 # update pmd load values650 dataif.update_pmd_load(self.pmd_map)651 # copy original pmd objects652 pmd_map = copy.deepcopy(self.pmd_map)653 # test dryrun654 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)655 # validate results656 # 1. all four rxqs be rebalanced.657 self.assertEqual(n_reb_rxq, 2, "two rxqs to be rebalanced")658 # 2. each pmd is updated.659 self.assertNotEqual(pmd_map[self.core1_id], pmd1)660 self.assertNotEqual(pmd_map[self.core2_id], pmd2)661 # 3. check rxq map after dryrun.662 port1 = pmd1.find_port_by_name('virtport1')663 port2 = pmd2.find_port_by_name('virtport2')664 port3 = pmd1.find_port_by_name('virtport3')665 port4 = pmd2.find_port_by_name('virtport4')666 self.assertEqual(port1.rxq_rebalanced, {})667 self.assertEqual(port2.rxq_rebalanced, {})668 self.assertEqual(port3.rxq_rebalanced[0], pmd2.id)669 self.assertEqual(port4.rxq_rebalanced[0], pmd1.id)670 # 4. check pmd load671 self.assertEqual(dataif.pmd_load(pmd1), (96.0 - 26.0 + 25.0))672 self.assertEqual(dataif.pmd_load(pmd2), (90.0 - 25.0 + 26.0))673 # del port object from pmd.674 # TODO: create fx_ post deletion routine for clean up675 pmd1.del_port('virtport1')676 pmd2.del_port('virtport2')677 # Test case:678 # With two threads from same numa, where each pmd thread is handling679 # two single-queued ports. Of them, only one rxq is busy while the680 # rest are idle. check whether rebalance is not moving busy rxq681 # from its pmd, while rest (which are idle rxqs) could be repinned682 # accordingly.683 #684 # order of rxqs based on cpu consumption: rxqp4 (and some order on685 # rxqp2,rxqp3,rxqp4)686 # order of pmds for rebalance dryrun: pmd1,pmd2,pmd2,pmd1687 #688 # 1. rxqp1(pmd1)689 # rxqp2(pmd2)690 # rxqp3(pmd1)691 # rxqp4(pmd2) -NOREB-> rxqp4(pmd2)692 #693 @mock.patch('netcontrold.lib.util.open')694 def test_four_1rxq_skip_lnuma(self, mock_open):695 mock_open.side_effect = [696 mock.mock_open(read_data=_FX_CPU_INFO).return_value697 ]698 # set same numa for pmds699 pmd1 = self.pmd_map[self.core1_id]700 pmd2 = self.pmd_map[self.core2_id]701 pmd1.numa_id = 0702 pmd2.numa_id = 0703 # create rxq704 fx_2pmd_each_2rxq(self)705 # except one rxq, let rest be idle.706 port1 = pmd1.find_port_by_name('virtport1')707 port2 = pmd2.find_port_by_name('virtport2')708 port3 = pmd1.find_port_by_name('virtport3')709 port4 = pmd2.find_port_by_name('virtport4')710 p1rxq = port1.find_rxq_by_id(0)711 p2rxq = port2.find_rxq_by_id(0)712 p3rxq = port3.find_rxq_by_id(0)713 p4rxq = port4.find_rxq_by_id(0)714 for i in range(0, config.ncd_samples_max):715 p1rxq.cpu_cyc[i] = 0716 p2rxq.cpu_cyc[i] = 0717 p3rxq.cpu_cyc[i] = 0718 p4rxq.cpu_cyc[i] = 98719 p1rxq.rx_cyc[i] = 0720 p2rxq.rx_cyc[i] = 0721 p3rxq.rx_cyc[i] = 0722 p4rxq.rx_cyc[i] = 98723 # fix cpu consumption for these pmds.724 for i in range(0, config.ncd_samples_max):725 pmd1.idle_cpu_cyc[i] = (100 * (i + 1))726 pmd1.proc_cpu_cyc[i] = (0 + (0 * i))727 pmd1.rx_cyc[i] = (0 * (i + 1))728 for i in range(0, config.ncd_samples_max):729 pmd2.idle_cpu_cyc[i] = (2 * (i + 1))730 pmd2.proc_cpu_cyc[i] = (98 * (i + 1))731 pmd2.rx_cyc[i] = (98 * (i + 1))732 # update pmd load values733 dataif.update_pmd_load(self.pmd_map)734 # copy original pmd objects735 pmd_map = copy.deepcopy(self.pmd_map)736 # test dryrun737 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)738 # validate results739 # 1. all four rxqs be rebalanced.740 self.assertEqual(n_reb_rxq, 0, "expected no rebalance")741 # 2. each pmd is not updated.742 self.assertEqual(pmd_map[self.core1_id], pmd1)743 self.assertEqual(pmd_map[self.core2_id], pmd2)744 # 3. check rxq map after dryrun.745 self.assertEqual(port1.rxq_rebalanced, {})746 self.assertEqual(port2.rxq_rebalanced, {})747 self.assertEqual(port3.rxq_rebalanced, {})748 self.assertEqual(port4.rxq_rebalanced, {})749 # 3.a and dry-run did not break original pinning.750 self.assertEqual(p4rxq.pmd.id, pmd2.id)751 # 4. check pmd load752 self.assertEqual(dataif.pmd_load(pmd1), 0)753 self.assertEqual(dataif.pmd_load(pmd2), 98.0)754 # del port object from pmd.755 # TODO: create fx_ post deletion routine for clean up756 pmd1.del_port('virtport1')757 pmd2.del_port('virtport2')758 pmd1.del_port('virtport3')759 pmd2.del_port('virtport4')760 # Test case:761 # With two threads from same numa, where each pmd thread is handling762 # two single-queued ports. Of them, all are busy. check whether763 # rebalance is skipped.764 #765 # order of rxqs based on cpu consumption: N/A766 # order of pmds for rebalance dryrun: N/A767 #768 # 1. rxqp1(pmd1)769 # rxqp2(pmd2)770 # rxqp3(pmd1)771 # rxqp4(pmd2)772 #773 @mock.patch('netcontrold.lib.util.open')774 def test_4busy_1rxq_skip_lnuma(self, mock_open):775 mock_open.side_effect = [776 mock.mock_open(read_data=_FX_CPU_INFO).return_value777 ]778 # set same numa for pmds779 pmd1 = self.pmd_map[self.core1_id]780 pmd2 = self.pmd_map[self.core2_id]781 pmd1.numa_id = 0782 pmd2.numa_id = 0783 # create rxq784 fx_2pmd_each_2rxq(self)785 # fix cpu consumption for these pmds.786 for i in range(0, config.ncd_samples_max):787 pmd1.idle_cpu_cyc[i] = (4 * (i + 1))788 pmd1.proc_cpu_cyc[i] = (96 * (i + 1))789 pmd1.rx_cyc[i] = (96 * (i + 1))790 for i in range(0, config.ncd_samples_max):791 pmd2.idle_cpu_cyc[i] = (4 * (i + 1))792 pmd2.proc_cpu_cyc[i] = (96 * (i + 1))793 pmd2.rx_cyc[i] = (96 * (i + 1))794 # all rxqs are busy795 port1 = pmd1.find_port_by_name('virtport1')796 port2 = pmd2.find_port_by_name('virtport2')797 port3 = pmd1.find_port_by_name('virtport3')798 port4 = pmd2.find_port_by_name('virtport4')799 p1rxq = port1.find_rxq_by_id(0)800 p2rxq = port2.find_rxq_by_id(0)801 p3rxq = port3.find_rxq_by_id(0)802 p4rxq = port4.find_rxq_by_id(0)803 for i in range(0, config.ncd_samples_max):804 p1rxq.cpu_cyc[i] = 70805 p2rxq.cpu_cyc[i] = 60806 p3rxq.cpu_cyc[i] = 26807 p4rxq.cpu_cyc[i] = 36808 p1rxq.rx_cyc[i] = 70809 p2rxq.rx_cyc[i] = 60810 p3rxq.rx_cyc[i] = 26811 p4rxq.rx_cyc[i] = 36812 # update pmd load values813 dataif.update_pmd_load(self.pmd_map)814 # copy original pmd objects815 pmd_map = copy.deepcopy(self.pmd_map)816 # test dryrun817 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)818 # validate results819 port1 = pmd1.find_port_by_name('virtport1')820 port2 = pmd2.find_port_by_name('virtport2')821 port3 = pmd1.find_port_by_name('virtport3')822 port4 = pmd2.find_port_by_name('virtport4')823 # 1. all four rxqs be rebalanced.824 self.assertEqual(n_reb_rxq, -1, "skip rebalance")825 # 2. each pmd is not updated.826 self.assertEqual(pmd_map[self.core1_id], pmd1)827 self.assertEqual(pmd_map[self.core2_id], pmd2)828 # 3. check rxq map after dryrun.829 self.assertEqual(port1.rxq_rebalanced, {})830 self.assertEqual(port2.rxq_rebalanced, {})831 self.assertEqual(port3.rxq_rebalanced, {})832 self.assertEqual(port4.rxq_rebalanced, {})833 # 4. check pmd load834 self.assertEqual(dataif.pmd_load(pmd1), 96.0)835 self.assertEqual(dataif.pmd_load(pmd2), 96.0)836 # del port object from pmd.837 # TODO: create fx_ post deletion routine for clean up838 pmd1.del_port('virtport1')839 pmd2.del_port('virtport2')840 pmd1.del_port('virtport3')841 pmd2.del_port('virtport4')842# Fixture:843# Create four pmd thread objects where in, each pmd has two single-queued844# ports.845def fx_4pmd_each_2rxq(testobj):846 # retrieve pmd object.847 pmd1 = testobj.pmd_map[testobj.core1_id]848 pmd2 = testobj.pmd_map[testobj.core2_id]849 pmd3 = testobj.pmd_map[testobj.core3_id]850 pmd4 = testobj.pmd_map[testobj.core4_id]851 # one dummy port is required for this test.852 port1_name = 'virtport1'853 port2_name = 'virtport2'854 port3_name = 'virtport3'855 port4_name = 'virtport4'856 port5_name = 'virtport5'857 port6_name = 'virtport6'858 port7_name = 'virtport7'859 port8_name = 'virtport8'860 # create port class of name 'virtport'.861 dataif.make_dataif_port(port1_name)862 dataif.make_dataif_port(port2_name)863 dataif.make_dataif_port(port3_name)864 dataif.make_dataif_port(port4_name)865 dataif.make_dataif_port(port5_name)866 dataif.make_dataif_port(port6_name)867 dataif.make_dataif_port(port7_name)868 dataif.make_dataif_port(port8_name)869 # add port object into pmd.870 fx_port1 = pmd1.add_port(port1_name)871 fx_port1.numa_id = pmd1.numa_id872 fx_port2 = pmd2.add_port(port2_name)873 fx_port2.numa_id = pmd2.numa_id874 fx_port3 = pmd3.add_port(port3_name)875 fx_port3.numa_id = pmd3.numa_id876 fx_port4 = pmd4.add_port(port4_name)877 fx_port4.numa_id = pmd4.numa_id878 fx_port5 = pmd1.add_port(port5_name)879 fx_port5.numa_id = pmd1.numa_id880 fx_port6 = pmd2.add_port(port6_name)881 fx_port6.numa_id = pmd2.numa_id882 fx_port7 = pmd3.add_port(port7_name)883 fx_port7.numa_id = pmd3.numa_id884 fx_port8 = pmd4.add_port(port8_name)885 fx_port8.numa_id = pmd4.numa_id886 # add a dummy rxq into port.887 fx_p1rxq = fx_port1.add_rxq(0)888 fx_p1rxq.pmd = pmd1889 fx_p2rxq = fx_port2.add_rxq(0)890 fx_p2rxq.pmd = pmd2891 fx_p3rxq = fx_port3.add_rxq(0)892 fx_p3rxq.pmd = pmd3893 fx_p4rxq = fx_port4.add_rxq(0)894 fx_p4rxq.pmd = pmd4895 fx_p5rxq = fx_port5.add_rxq(0)896 fx_p5rxq.pmd = pmd1897 fx_p6rxq = fx_port6.add_rxq(0)898 fx_p6rxq.pmd = pmd2899 fx_p7rxq = fx_port7.add_rxq(0)900 fx_p7rxq.pmd = pmd3901 fx_p8rxq = fx_port8.add_rxq(0)902 fx_p8rxq.pmd = pmd4903 # add some cpu consumption for these rxqs.904 # order of rxqs based on cpu consumption: rxqp1,rxqp2,..rxqp8905 for i in range(0, config.ncd_samples_max):906 fx_p1rxq.cpu_cyc[i] = 76907 fx_p1rxq.rx_cyc[i] = 76908 fx_p2rxq.cpu_cyc[i] = 75909 fx_p2rxq.rx_cyc[i] = 75910 fx_p3rxq.cpu_cyc[i] = 74911 fx_p3rxq.rx_cyc[i] = 74912 fx_p4rxq.cpu_cyc[i] = 73913 fx_p4rxq.rx_cyc[i] = 73914 fx_p5rxq.cpu_cyc[i] = 20915 fx_p5rxq.rx_cyc[i] = 20916 fx_p6rxq.cpu_cyc[i] = 15917 fx_p6rxq.rx_cyc[i] = 15918 fx_p7rxq.cpu_cyc[i] = 11919 fx_p7rxq.rx_cyc[i] = 11920 fx_p8rxq.cpu_cyc[i] = 7921 fx_p8rxq.rx_cyc[i] = 7922class TestRebalDryrun_FourPmd(TestCase):923 """924 Test rebalance for one or more rxq handled by four pmds.925 """926 rebalance_dryrun = dataif.rebalance_dryrun_by_cyc927 pmd_map = dict()928 core1_id = 0929 core2_id = 1930 core3_id = 4931 core4_id = 5932 # setup test environment933 def setUp(self):934 util.Memoize.forgot = True935 # turn off limited info shown in assert failure for pmd object.936 self.maxDiff = None937 dataif.Context.nlog = NlogNoop()938 # create one pmd object.939 fx_pmd1 = dataif.Dataif_Pmd(self.core1_id)940 fx_pmd2 = dataif.Dataif_Pmd(self.core2_id)941 fx_pmd3 = dataif.Dataif_Pmd(self.core3_id)942 fx_pmd4 = dataif.Dataif_Pmd(self.core4_id)943 # let it be in numa 0.944 fx_pmd1.numa_id = 0945 fx_pmd2.numa_id = 0946 fx_pmd3.numa_id = 0947 fx_pmd4.numa_id = 0948 # add some cpu consumption for these pmds.949 for i in range(0, config.ncd_samples_max):950 fx_pmd1.idle_cpu_cyc[i] = (4 * (i + 1))951 fx_pmd1.proc_cpu_cyc[i] = (96 * (i + 1))952 fx_pmd1.rx_cyc[i] = (96 * (i + 1))953 fx_pmd1.cyc_idx = config.ncd_samples_max - 1954 for i in range(0, config.ncd_samples_max):955 fx_pmd2.idle_cpu_cyc[i] = (10 * (i + 1))956 fx_pmd2.proc_cpu_cyc[i] = (90 * (i + 1))957 fx_pmd2.rx_cyc[i] = (90 * (i + 1))958 fx_pmd2.cyc_idx = config.ncd_samples_max - 1959 for i in range(0, config.ncd_samples_max):960 fx_pmd3.idle_cpu_cyc[i] = (15 * (i + 1))961 fx_pmd3.proc_cpu_cyc[i] = (85 * (i + 1))962 fx_pmd3.rx_cyc[i] = (85 * (i + 1))963 fx_pmd3.cyc_idx = config.ncd_samples_max - 1964 for i in range(0, config.ncd_samples_max):965 fx_pmd4.idle_cpu_cyc[i] = (20 * (i + 1))966 fx_pmd4.proc_cpu_cyc[i] = (80 * (i + 1))967 fx_pmd4.rx_cyc[i] = (80 * (i + 1))968 fx_pmd4.cyc_idx = config.ncd_samples_max - 1969 self.pmd_map[self.core1_id] = fx_pmd1970 self.pmd_map[self.core2_id] = fx_pmd2971 self.pmd_map[self.core3_id] = fx_pmd3972 self.pmd_map[self.core4_id] = fx_pmd4973 return974 # Test case:975 # With four threads from same numa, where each pmd thread is handling976 # two single-queued ports. check whether rebalance is performed.977 # Scope is to check if rxq from a pmd which was a rebalancing pmd978 # before, is assigned other pmd successfully.979 #980 # order of rxqs based on cpu consumption: rxqp1,rxqp2,rxqp3,rxqp4981 # order of pmds for rebalance dryrun: pmd1,pmd2,pmd3,pmd4982 #983 # 1. rxqp1(pmd1) -NOREB-> rxqp1(pmd1)984 # rxqp2(pmd2) -NOREB-> rxqp2(pmd2)985 # rxqp3(pmd3) -NOREB-> rxqp3(pmd3)986 # rxqp4(pmd4) -NOREB-> rxqp4(pmd4)987 # rxqp5(pmd1)988 # rxqp6(pmd2)989 # rxqp7(pmd3)990 # rxqp8(pmd4)991 #992 # 2. rxqp1(pmd1) -NOREB-> rxqp1(pmd1)993 # rxqp2(pmd2) -NOREB-> rxqp2(pmd2)994 # rxqp3(pmd3) -NOREB-> rxqp3(pmd3)995 # rxqp4(pmd4) -NOREB-> rxqp4(pmd4)996 # rxqp5(pmd1) --+--+-> rxqp5(reb_pmd4)997 # rxqp6(pmd2) --+--+-> rxqp6(reb_pmd3)998 # rxqp7(pmd3) --+--+-> rxqp7(reb_pmd2)999 # rxqp8(pmd4) --+--+-> rxqp8(reb_pmd1)1000 #1001 @mock.patch('netcontrold.lib.util.open')1002 def test_eight_1rxq_lnuma(self, mock_open):1003 mock_open.side_effect = [1004 mock.mock_open(read_data=_FX_4X2CPU_INFO).return_value1005 ]1006 # set same numa for pmds1007 pmd1 = self.pmd_map[self.core1_id]1008 pmd2 = self.pmd_map[self.core2_id]1009 pmd3 = self.pmd_map[self.core3_id]1010 pmd4 = self.pmd_map[self.core4_id]1011 pmd1.numa_id = 01012 pmd2.numa_id = 01013 pmd3.numa_id = 01014 pmd4.numa_id = 01015 # create rxq1016 fx_4pmd_each_2rxq(self)1017 # update pmd load values1018 dataif.update_pmd_load(self.pmd_map)1019 # copy original pmd objects1020 pmd_map = copy.deepcopy(self.pmd_map)1021 # test dryrun1022 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)1023 # validate results1024 # 1. all four rxqs be rebalanced.1025 self.assertEqual(n_reb_rxq, 4, "four rxqs to be rebalanced")1026 # 2. each pmd is updated.1027 self.assertNotEqual(pmd_map[self.core1_id], pmd1)1028 self.assertNotEqual(pmd_map[self.core2_id], pmd2)1029 self.assertNotEqual(pmd_map[self.core3_id], pmd3)1030 self.assertNotEqual(pmd_map[self.core4_id], pmd4)1031 # 3. check rxq map after dryrun.1032 port1 = pmd1.find_port_by_name('virtport1')1033 port2 = pmd2.find_port_by_name('virtport2')1034 port3 = pmd3.find_port_by_name('virtport3')1035 port4 = pmd4.find_port_by_name('virtport4')1036 port5 = pmd1.find_port_by_name('virtport5')1037 port6 = pmd2.find_port_by_name('virtport6')1038 port7 = pmd3.find_port_by_name('virtport7')1039 port8 = pmd4.find_port_by_name('virtport8')1040 self.assertEqual(port1.rxq_rebalanced, {})1041 self.assertEqual(port2.rxq_rebalanced, {})1042 self.assertEqual(port3.rxq_rebalanced, {})1043 self.assertEqual(port4.rxq_rebalanced, {})1044 self.assertEqual(port5.rxq_rebalanced[0], pmd4.id)1045 self.assertEqual(port6.rxq_rebalanced[0], pmd3.id)1046 self.assertEqual(port7.rxq_rebalanced[0], pmd2.id)1047 self.assertEqual(port8.rxq_rebalanced[0], pmd1.id)1048 # 4. check pmd load1049 self.assertEqual(dataif.pmd_load(pmd1), (96.0 - 20.0 + 7.0))1050 self.assertEqual(dataif.pmd_load(pmd2), (90.0 - 15.0 + 11.0))1051 self.assertEqual(dataif.pmd_load(pmd3), (85.0 - 11.0 + 15.0))1052 self.assertEqual(dataif.pmd_load(pmd4), (80.0 - 7.0 + 20.0))1053 # del port object from pmd.1054 # TODO: create fx_ post deletion routine for clean up1055 pmd1.del_port('virtport1')1056 pmd2.del_port('virtport2')1057 pmd3.del_port('virtport3')1058 pmd4.del_port('virtport4')1059# Fixture:1060# Create two pmd thread objects per numa where in, one pmd has three1061# single-queued ports, while the other is idle (without any port/rxq).1062def fx_2pmd_one_empty_per_numa(testobj):1063 # retrieve pmd object.1064 pmd1 = testobj.pmd_map[testobj.core1_id]1065 pmd3 = testobj.pmd_map[testobj.core3_id]1066 # dummy ports required for this test.1067 port1_name = 'virtport1'1068 port2_name = 'virtport2'1069 port3_name = 'virtport3'1070 port4_name = 'virtport4'1071 port5_name = 'virtport5'1072 port6_name = 'virtport6'1073 # create port class of name 'virtport'.1074 dataif.make_dataif_port(port1_name)1075 dataif.make_dataif_port(port2_name)1076 dataif.make_dataif_port(port3_name)1077 dataif.make_dataif_port(port4_name)1078 dataif.make_dataif_port(port5_name)1079 dataif.make_dataif_port(port6_name)1080 # add port object into pmd.1081 fx_port1 = pmd1.add_port(port1_name)1082 fx_port1.numa_id = pmd1.numa_id1083 fx_port2 = pmd1.add_port(port2_name)1084 fx_port2.numa_id = pmd1.numa_id1085 fx_port3 = pmd1.add_port(port3_name)1086 fx_port3.numa_id = pmd1.numa_id1087 fx_port4 = pmd3.add_port(port4_name)1088 fx_port4.numa_id = pmd3.numa_id1089 fx_port5 = pmd3.add_port(port5_name)1090 fx_port5.numa_id = pmd3.numa_id1091 fx_port6 = pmd3.add_port(port6_name)1092 fx_port6.numa_id = pmd3.numa_id1093 # add a dummy rxq into port.1094 fx_p1rxq = fx_port1.add_rxq(0)1095 fx_p1rxq.pmd = pmd11096 fx_p2rxq = fx_port2.add_rxq(0)1097 fx_p2rxq.pmd = pmd11098 fx_p3rxq = fx_port3.add_rxq(0)1099 fx_p3rxq.pmd = pmd11100 fx_p4rxq = fx_port4.add_rxq(0)1101 fx_p4rxq.pmd = pmd31102 fx_p5rxq = fx_port5.add_rxq(0)1103 fx_p5rxq.pmd = pmd31104 fx_p6rxq = fx_port6.add_rxq(0)1105 fx_p6rxq.pmd = pmd31106 # add some cpu consumption for these rxqs.1107 # order of rxqs based on cpu consumption: rxqp2,rxqp1,rxqp3,1108 # rxqp5,rxqp4,rxqp61109 for i in range(0, config.ncd_samples_max):1110 fx_p2rxq.cpu_cyc[i] = 661111 fx_p1rxq.cpu_cyc[i] = 201112 fx_p3rxq.cpu_cyc[i] = 101113 fx_p5rxq.cpu_cyc[i] = 661114 fx_p4rxq.cpu_cyc[i] = 201115 fx_p6rxq.cpu_cyc[i] = 101116 fx_p2rxq.rx_cyc[i] = 661117 fx_p1rxq.rx_cyc[i] = 201118 fx_p3rxq.rx_cyc[i] = 101119 fx_p5rxq.rx_cyc[i] = 661120 fx_p4rxq.rx_cyc[i] = 201121 fx_p6rxq.rx_cyc[i] = 101122class TestRebalDryrun_FourPmd_Numa(TestCase):1123 """1124 Test rebalance for one or more rxq handled by four pmds.1125 """1126 rebalance_dryrun = dataif.rebalance_dryrun_by_cyc1127 pmd_map = dict()1128 core1_id = 01129 core2_id = 11130 core3_id = 61131 core4_id = 71132 # setup test environment1133 def setUp(self):1134 util.Memoize.forgot = True1135 # turn off limited info shown in assert failure for pmd object.1136 self.maxDiff = None1137 dataif.Context.nlog = NlogNoop()1138 # create one pmd object.1139 fx_pmd1 = dataif.Dataif_Pmd(self.core1_id)1140 fx_pmd2 = dataif.Dataif_Pmd(self.core2_id)1141 fx_pmd3 = dataif.Dataif_Pmd(self.core3_id)1142 fx_pmd4 = dataif.Dataif_Pmd(self.core4_id)1143 # let it be in numa 0.1144 fx_pmd1.numa_id = 01145 fx_pmd2.numa_id = 01146 fx_pmd3.numa_id = 11147 fx_pmd4.numa_id = 11148 # add some cpu consumption for these pmds.1149 for i in range(0, config.ncd_samples_max):1150 fx_pmd1.idle_cpu_cyc[i] = (4 * (i + 1))1151 fx_pmd1.proc_cpu_cyc[i] = (96 * (i + 1))1152 fx_pmd1.rx_cyc[i] = (96 * (i + 1))1153 fx_pmd1.cyc_idx = config.ncd_samples_max - 11154 for i in range(0, config.ncd_samples_max):1155 fx_pmd2.idle_cpu_cyc[i] = (10 * (i + 1))1156 fx_pmd2.proc_cpu_cyc[i] = (90 * (i + 1))1157 fx_pmd2.rx_cyc[i] = (90 * (i + 1))1158 fx_pmd2.cyc_idx = config.ncd_samples_max - 11159 for i in range(0, config.ncd_samples_max):1160 fx_pmd3.idle_cpu_cyc[i] = (15 * (i + 1))1161 fx_pmd3.proc_cpu_cyc[i] = (85 * (i + 1))1162 fx_pmd3.rx_cyc[i] = (85 * (i + 1))1163 fx_pmd3.cyc_idx = config.ncd_samples_max - 11164 for i in range(0, config.ncd_samples_max):1165 fx_pmd4.idle_cpu_cyc[i] = (20 * (i + 1))1166 fx_pmd4.proc_cpu_cyc[i] = (80 * (i + 1))1167 fx_pmd4.rx_cyc[i] = (80 * (i + 1))1168 fx_pmd4.cyc_idx = config.ncd_samples_max - 11169 self.pmd_map[self.core1_id] = fx_pmd11170 self.pmd_map[self.core2_id] = fx_pmd21171 self.pmd_map[self.core3_id] = fx_pmd31172 self.pmd_map[self.core4_id] = fx_pmd41173 return1174 # Test case:1175 # With two threads per numa, where one pmd thread is handling1176 # two single-queued ports, while the other pmd is empty,1177 # check whether rebalance is performed in each numa.1178 # Scope is to check if only one rxq is moved to empty pmd1179 # within numa affinity.1180 #1181 # order of rxqs based on cpu consumption: rxqp2,rxqp1,rxqp5,rxqp41182 # order of pmds for rebalance dryrun: pmd1N0,pmd3N1,pmd2N0,pmd4N11183 #1184 # 1. rxqp2(pmd1N0) -NOREB-> rxqp2(pmd1N0)1185 # rxqp1(pmd1N0)1186 # - (pmd2N0)1187 #1188 # 2 rxqp5(pmd3N1) -NOREB-> rxqp5(pmd3N0)1189 # rxqp4(pmd3N1)1190 # - (pmd4N1)1191 #1192 # 3. rxqp2(pmd1N0) -NOREB-> rxqp2(pmd1N0)1193 # rxqp1(pmd1N0) --+--+-> rxqp1(reb_pmd2N0)1194 #1195 # 4. rxqp5(pmd3N1) -NOREB-> rxqp5(pmd3N1)1196 # rxqp4(pmd3N1) --+--+-> rxqp4(reb_pmd4N1)1197 #1198 @mock.patch('netcontrold.lib.util.open')1199 def test_two_1rxq_with_empty_per_numa(self, mock_open):1200 mock_open.side_effect = [1201 mock.mock_open(read_data=_FX_4X2CPU_INFO).return_value1202 ]1203 # set numa for pmds1204 self.core1_id = 01205 self.core2_id = 11206 self.core3_id = 61207 self.core4_id = 71208 pmd1 = self.pmd_map[self.core1_id]1209 pmd2 = self.pmd_map[self.core2_id]1210 pmd3 = self.pmd_map[self.core3_id]1211 pmd4 = self.pmd_map[self.core4_id]1212 # create rxq1213 fx_2pmd_one_empty_per_numa(self)1214 # delete excess ports in pmds1215 pmd1.del_port('virtport3')1216 pmd3.del_port('virtport6')1217 # update pmd stats for 2 and 4 to be idle.1218 for i in range(0, config.ncd_samples_max):1219 pmd2.idle_cpu_cyc[i] = (100 * (i + 1))1220 pmd2.proc_cpu_cyc[i] = (0 * (i + 1))1221 pmd2.rx_cyc[i] = (0 * (i + 1))1222 for i in range(0, config.ncd_samples_max):1223 pmd4.idle_cpu_cyc[i] = (100 * (i + 1))1224 pmd4.proc_cpu_cyc[i] = (0 * (i + 1))1225 pmd4.rx_cyc[i] = (0 * (i + 1))1226 # update rxq stats for ports in pmd 1 and pmd 31227 port1 = pmd1.find_port_by_name('virtport1')1228 port2 = pmd1.find_port_by_name('virtport2')1229 port4 = pmd3.find_port_by_name('virtport4')1230 port5 = pmd3.find_port_by_name('virtport5')1231 p1rxq = port1.find_rxq_by_id(0)1232 p2rxq = port2.find_rxq_by_id(0)1233 p4rxq = port4.find_rxq_by_id(0)1234 p5rxq = port5.find_rxq_by_id(0)1235 for i in range(0, config.ncd_samples_max):1236 p2rxq.cpu_cyc[i] = 661237 p2rxq.rx_cyc[i] = 661238 p1rxq.cpu_cyc[i] = 301239 p1rxq.rx_cyc[i] = 301240 p5rxq.cpu_cyc[i] = 661241 p5rxq.rx_cyc[i] = 661242 p4rxq.cpu_cyc[i] = 301243 p4rxq.rx_cyc[i] = 301244 # update pmd load values1245 dataif.update_pmd_load(self.pmd_map)1246 # copy original pmd objects1247 pmd_map = copy.deepcopy(self.pmd_map)1248 # test dryrun1249 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)1250 # validate results1251 # 1. all two rxqs be rebalanced.1252 self.assertEqual(n_reb_rxq, 2, "two rxqs to be rebalanced")1253 # 2. each pmd is updated.1254 self.assertNotEqual(pmd_map[self.core1_id], pmd1)1255 self.assertNotEqual(pmd_map[self.core2_id], pmd2)1256 self.assertNotEqual(pmd_map[self.core3_id], pmd3)1257 self.assertNotEqual(pmd_map[self.core4_id], pmd4)1258 # 3. check rxq map after dryrun.1259 port2reb = pmd2.find_port_by_name('virtport1')1260 port4reb = pmd4.find_port_by_name('virtport4')1261 # 3.a rxqp2 remains in pmd11262 self.assertEqual(port2.rxq_rebalanced, {})1263 self.assertEqual(port2.find_rxq_by_id(0).pmd.id, pmd1.id)1264 # 3.b rxqp3 remains in pmd31265 self.assertEqual(port5.rxq_rebalanced, {})1266 self.assertEqual(port5.find_rxq_by_id(0).pmd.id, pmd3.id)1267 # 3.c rxqp1 moves from pmd1 to pmd21268 self.assertEqual(port1.rxq_rebalanced[0], pmd2.id)1269 self.assertIsNone(port1.find_rxq_by_id(0))1270 # 3.c.0 and dry-run did not break original pinning.1271 rxqp2reb = port2reb.find_rxq_by_id(0)1272 self.assertEqual(rxqp2reb.pmd.id, pmd1.id)1273 # 3.d rxqp4 moves from pmd3 to pmd41274 self.assertEqual(port4.rxq_rebalanced[0], pmd4.id)1275 self.assertIsNone(port4.find_rxq_by_id(0))1276 # 3.d.0 and dry-run did not break original pinning.1277 rxqp4reb = port4reb.find_rxq_by_id(0)1278 self.assertEqual(rxqp4reb.pmd.id, pmd3.id)1279 # 4. check pmd load1280 self.assertEqual(dataif.pmd_load(pmd1), (96.0 - 30.0))1281 self.assertEqual(dataif.pmd_load(pmd2), 30.0)1282 self.assertEqual(dataif.pmd_load(pmd3), (85.0 - 30.0))1283 self.assertEqual(dataif.pmd_load(pmd4), 30.0)1284 # del port object from pmd.1285 # TODO: create fx_ post deletion routine for clean up1286 pmd1.del_port('virtport1')1287 pmd1.del_port('virtport2')1288 pmd3.del_port('virtport5')1289 pmd3.del_port('virtport4')1290 # Test case:1291 # With two threads per numa, where one pmd thread is handling1292 # three single-queued ports in first numa, with the other pmd being1293 # idle, at the same time pmds in other numa are entirely idle.1294 # Check whether rebalance is performed in only first numa.1295 # Scope is to check if only one rxq is moved to empty pmd1296 # within numa affinity.1297 #1298 # order of rxqs based on cpu consumption: rxqp2,rxqp1,rxqp31299 # order of pmds for rebalance dryrun: pmd1N0,pmd3N1,pmd2N0,pmd4N11300 #1301 # 1. rxqp2(pmd1N0) -NOREB-> rxqp2(pmd1N0)1302 # rxqp1(pmd1N0)1303 # rxqp3(pmd1N0)1304 # - (pmd2N0)1305 #1306 # - (pmd3N1)1307 # - (pmd4N1)1308 #1309 # 2. rxqp2(pmd1N0) -NOREB-> rxqp2(pmd1N0)1310 # rxqp1(pmd1N0) --+--+-> rxqp1(reb_pmd2N0)1311 # rxqp3(pmd1N0)1312 #1313 # - (pmd3N1)1314 # - (pmd4N1)1315 #1316 # 3. rxqp2(pmd1N0) -NOREB-> rxqp2(pmd1N0)1317 # rxqp1(pmd1N0) --+--+-> rxqp1(reb_pmd2N0)1318 # rxqp3(pmd1N0) --+--+-> rxqp3(reb_pmd2N0)1319 #1320 # - (pmd3N1)1321 # - (pmd4N1)1322 #1323 @mock.patch('netcontrold.lib.util.open')1324 def test_two_1rxq_with_empty_one_numa(self, mock_open):1325 mock_open.side_effect = [1326 mock.mock_open(read_data=_FX_4X2CPU_INFO).return_value1327 ]1328 # set numa for pmds1329 self.core1_id = 01330 self.core2_id = 11331 self.core3_id = 61332 self.core4_id = 71333 pmd1 = self.pmd_map[self.core1_id]1334 pmd2 = self.pmd_map[self.core2_id]1335 pmd3 = self.pmd_map[self.core3_id]1336 pmd4 = self.pmd_map[self.core4_id]1337 # create rxq1338 fx_2pmd_one_empty_per_numa(self)1339 # empty pmd threads in second numa1340 pmd3.del_port('virtport4')1341 pmd3.del_port('virtport5')1342 pmd3.del_port('virtport6')1343 # update pmd stats for 2, 3 and 4 to be idle.1344 for i in range(0, config.ncd_samples_max):1345 pmd2.idle_cpu_cyc[i] = (100 * (i + 1))1346 pmd2.proc_cpu_cyc[i] = (0 * (i + 1))1347 pmd2.rx_cyc[i] = (0 * (i + 1))1348 pmd3.idle_cpu_cyc[i] = (100 * (i + 1))1349 pmd3.proc_cpu_cyc[i] = (0 * (i + 1))1350 pmd3.rx_cyc[i] = (0 * (i + 1))1351 pmd4.idle_cpu_cyc[i] = (100 * (i + 1))1352 pmd4.proc_cpu_cyc[i] = (0 * (i + 1))1353 pmd4.rx_cyc[i] = (0 * (i + 1))1354 # update pmd load values1355 dataif.update_pmd_load(self.pmd_map)1356 # copy original pmd objects1357 pmd_map = copy.deepcopy(self.pmd_map)1358 # test dryrun1359 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)1360 # validate results1361 # 1. two rxqs be rebalanced in numa 1.1362 self.assertEqual(n_reb_rxq, 2, "two rxqs to be rebalanced")1363 # 2. each pmd is updated, except numa 2.1364 self.assertNotEqual(pmd_map[self.core1_id], pmd1)1365 self.assertNotEqual(pmd_map[self.core2_id], pmd2)1366 self.assertEqual(pmd_map[self.core3_id], pmd3)1367 self.assertEqual(pmd_map[self.core4_id], pmd4)1368 # 3. check rxq map after dryrun.1369 port1 = pmd1.find_port_by_name('virtport1')1370 port2 = pmd1.find_port_by_name('virtport2')1371 port3 = pmd1.find_port_by_name('virtport3')1372 port1reb = pmd2.find_port_by_name('virtport1')1373 port3reb = pmd2.find_port_by_name('virtport3')1374 # 3.a rxqp2 remains in pmd11375 self.assertEqual(port2.rxq_rebalanced, {})1376 self.assertEqual(port2.find_rxq_by_id(0).pmd.id, pmd1.id)1377 # 3.b rxqp1 moves from pmd1 to pmd21378 self.assertEqual(port1.rxq_rebalanced[0], pmd2.id)1379 self.assertIsNone(port1.find_rxq_by_id(0))1380 # 3.b.0 and dry-run did not break original pinning.1381 rxqp1reb = port1reb.find_rxq_by_id(0)1382 self.assertEqual(rxqp1reb.pmd.id, pmd1.id)1383 # 3.c rxqp3 moves from pmd1 to pmd21384 self.assertEqual(port3.rxq_rebalanced[0], pmd2.id)1385 self.assertIsNone(port3.find_rxq_by_id(0))1386 # 3.c.0 and dry-run did not break original pinning.1387 rxqp3reb = port3reb.find_rxq_by_id(0)1388 self.assertEqual(rxqp3reb.pmd.id, pmd1.id)1389 # 3.d no port moved into numa 11390 self.assertEqual(pmd3.count_rxq(), 0)1391 self.assertEqual(pmd4.count_rxq(), 0)1392 # 4. check pmd load1393 self.assertEqual(dataif.pmd_load(pmd1), (96.0 - 20.0 - 10.0))1394 self.assertEqual(dataif.pmd_load(pmd2), 30.0)1395 self.assertEqual(dataif.pmd_load(pmd3), 0)1396 self.assertEqual(dataif.pmd_load(pmd4), 0)1397 # del port object from pmd.1398 # TODO: create fx_ post deletion routine for clean up1399 pmd1.del_port('virtport1')1400 pmd1.del_port('virtport2')1401class TestRebalDryrunIQ_OnePmd(TestRebalDryrun_OnePmd):1402 """1403 Test rebalance for one or more rxq handled by one pmd.1404 """1405 rebalance_dryrun = dataif.rebalance_dryrun_by_iq1406 pmd_map = dict()1407 core_id = 01408class TestRebalDryrunIQ_TwoPmd(TestRebalDryrun_TwoPmd):1409 """1410 Test rebalance for one or more rxq handled by two pmds.1411 """1412 rebalance_dryrun = dataif.rebalance_dryrun_by_iq1413 pmd_map = dict()1414 core1_id = 01415 core2_id = 11416 @pytest.mark.skip(reason="not applicable")1417 def test_four_1rxq_lnuma(self, mock_open):1418 ...1419 @pytest.mark.skip(reason="not applicable")1420 def test_four_1rxq_skip_lnuma(self, mock_open):1421 ...1422 @pytest.mark.skip(reason="not applicable")1423 def test_two_1p2rxq_lnuma(self, mock_open):1424 ...1425 @pytest.mark.skip(reason="not applicable")1426 def test_two_1p2rxq_lnuma_norb(self, mock_open):1427 ...1428class TestRebalDryrunIQ_FourPmd(TestRebalDryrun_FourPmd):1429 """1430 Test rebalance for one or more rxq handled by four pmds.1431 """1432 rebalance_dryrun = dataif.rebalance_dryrun_by_iq1433 pmd_map = dict()1434 core1_id = 01435 core2_id = 11436 core3_id = 41437 core4_id = 51438 @pytest.mark.skip(reason="not applicable")1439 def test_eight_1rxq_lnuma(self, mock_open):1440 ...1441class TestRebalDryrunIQ_FourPmd_Numa(TestRebalDryrun_FourPmd_Numa):1442 """1443 Test rebalance for one or more rxq handled by four pmds.1444 """1445 rebalance_dryrun = dataif.rebalance_dryrun_by_cyc1446 pmd_map = dict()1447 core1_id = 01448 core2_id = 11449 core3_id = 61450 core4_id = 71451class TestRebalDryrunVarLoad_TwoPmd(TestCase):1452 """1453 Test rebalance for one or more rxq handled by two pmds.1454 """1455 rebalance_dryrun = dataif.rebalance_dryrun_by_cyc1456 pmd_map = dict()1457 core1_id = 01458 core2_id = 11459 # setup test environment1460 def setUp(self):1461 util.Memoize.forgot = True1462 # turn off limited info shown in assert failure for pmd object.1463 self.maxDiff = None1464 dataif.Context.nlog = NlogNoop()1465 # create one pmd object.1466 fx_pmd1 = dataif.Dataif_Pmd(self.core1_id)1467 fx_pmd2 = dataif.Dataif_Pmd(self.core2_id)1468 # let it be in numa 0.1469 fx_pmd1.numa_id = 01470 fx_pmd2.numa_id = 01471 # add some cpu consumption for these pmds.1472 # pmd1 to be 95% busy i.e using 5*96 cycles out of 5*100 cycles1473 for i in range(0, config.ncd_samples_max):1474 fx_pmd1.idle_cpu_cyc[i] = (4 * (i + 1))1475 fx_pmd1.proc_cpu_cyc[i] = (96 * (i + 1))1476 fx_pmd1.rx_cyc[i] = (96 * (i + 1))1477 fx_pmd1.cyc_idx = config.ncd_samples_max - 11478 # pmd2 to be 90% busy i.e using 5*90 cycles out of 5*100 cycles1479 for i in range(0, config.ncd_samples_max):1480 fx_pmd2.idle_cpu_cyc[i] = (10 * (i + 1))1481 fx_pmd2.proc_cpu_cyc[i] = (90 * (i + 1))1482 fx_pmd2.rx_cyc[i] = (90 * (i + 1))1483 fx_pmd2.cyc_idx = config.ncd_samples_max - 11484 self.pmd_map[self.core1_id] = fx_pmd11485 self.pmd_map[self.core2_id] = fx_pmd21486 return1487 # Test case:1488 # With two threads from same numa, where one pmd thread is handling1489 # two single-queued ports, while the other pmd is empty,1490 # check whether rebalance is performed.1491 # Scope is to check if only one rxq is moved to empty pmd.1492 #1493 # order of rxqs based on cpu consumption: rxqp2,rxqp11494 # order of pmds for rebalance dryrun: pmd1,pmd21495 #1496 # 1. rxqp2(pmd1) -NOREB-> rxqp2(pmd1)1497 # rxqp1(pmd1)1498 # - (pmd2)1499 #1500 # 2. rxqp2(pmd1) -NOREB-> rxqp2(pmd1)1501 # rxqp1(pmd1) --+--+-> rxqp1(reb_pmd2)1502 #1503 @mock.patch('netcontrold.lib.util.open')1504 def test_two_1rxq_with_empty_lnuma(self, mock_open):1505 mock_open.side_effect = [1506 mock.mock_open(read_data=_FX_CPU_INFO).return_value1507 ]1508 # set same numa for pmds1509 pmd1 = self.pmd_map[self.core1_id]1510 pmd2 = self.pmd_map[self.core2_id]1511 pmd1.numa_id = 01512 pmd2.numa_id = 01513 # let pmd2 be idle1514 for i in range(0, config.ncd_samples_max):1515 pmd2.idle_cpu_cyc[i] = (100 * (i + 1))1516 pmd2.proc_cpu_cyc[i] = (0 * (i + 1))1517 pmd2.rx_cyc[i] = (0 * (i + 1))1518 # create rxq1519 fx_2pmd_one_empty(self)1520 # update rxq stats1521 port1 = pmd1.find_port_by_name('virtport1')1522 port2 = pmd1.find_port_by_name('virtport2')1523 p1rxq = port1.find_rxq_by_id(0)1524 p2rxq = port2.find_rxq_by_id(0)1525 # fluctuate rxq consumption within 96% load in pmd11526 # p1rxq consumption is 36 % in 5*100 cyc ie 1801527 # p2rxq consumption is 60 % in 5*100 cyc ie 3001528 p1rxq.rx_cyc = [0, 61, 51, 31, 21, 16]1529 p1rxq.cpu_cyc = [0, 61, 51, 31, 21, 16]1530 p2rxq.rx_cyc = [0, 35, 45, 65, 75, 80]1531 p2rxq.cpu_cyc = [0, 35, 45, 65, 75, 80]1532 # update pmd load values1533 dataif.update_pmd_load(self.pmd_map)1534 # copy original pmd objects1535 pmd_map = copy.deepcopy(self.pmd_map)1536 # test dryrun1537 n_reb_rxq = type(self).rebalance_dryrun(self.pmd_map)1538 # validate results1539 # 1. all two rxqs be rebalanced.1540 self.assertEqual(n_reb_rxq, 1, "one rxq to be rebalanced")1541 # 2. each pmd is updated.1542 self.assertNotEqual(pmd_map[self.core1_id], pmd1)1543 self.assertNotEqual(pmd_map[self.core2_id], pmd2)1544 # 3. check rxq map after dryrun.1545 # 3.a rxqp2 remains in pmd11546 self.assertEqual(port2.rxq_rebalanced, {})1547 self.assertEqual(port2.find_rxq_by_id(0).pmd.id, pmd1.id)1548 # 3.a rxqp1 moves into pmd21549 self.assertEqual(port1.rxq_rebalanced[0], pmd2.id)1550 # 4. check pmd load1551 self.assertEqual(dataif.pmd_load(pmd1), 60.0)1552 self.assertEqual(dataif.pmd_load(pmd2), 36.0)1553 # del port object from pmd.1554 # TODO: create fx_ post deletion routine for clean up1555 pmd1.del_port('virtport1')...

Full Screen

Full Screen

test_pmd_load_variance.py

Source:test_pmd_load_variance.py Github

copy

Full Screen

...72 dataif.update_pmd_load(self.pmd_map)73 variance_value = dataif.pmd_load_variance(self.pmd_map)74 self.assertEqual(variance_value, 0)75 # del port object from pmd.76 pmd.del_port(port_name)77 # Test case:78 # With one pmd thread handling few one single-queue ports79 def test_many_ports(self):80 # retrieve pmd object.81 pmd = self.pmd_map[self.core_id]82 # one few dummy ports required for this test.83 for port_name in ('virtport1', 'virtport2', 'virtport3'):84 # create port class of name 'virtport'.85 dataif.make_dataif_port(port_name)86 # add port object into pmd.87 fx_port = pmd.add_port(port_name)88 fx_port.numa_id = pmd.numa_id89 # add a dummy rxq into port.90 fx_rxq = fx_port.add_rxq(0)91 # add some cpu consumption for this rxq.92 for i in range(0, config.ncd_samples_max):93 fx_rxq.cpu_cyc[i] = (1000 + (100 * i))94 dataif.update_pmd_load(self.pmd_map)95 variance_value = dataif.pmd_load_variance(self.pmd_map)96 self.assertEqual(variance_value, 0)97 for port_name in ('virtport1', 'virtport2', 'virtport3'):98 pmd.del_port(port_name)99# Fixture:100# create two pmd thread objects where in, each has one single-queued port.101def fx_2pmd_for_1rxq_each(testobj):102 # retrieve pmd object.103 pmd1 = testobj.pmd_map[testobj.core1_id]104 pmd2 = testobj.pmd_map[testobj.core2_id]105 # one dummy port is required for this test.106 port1_name = 'virtport1'107 port2_name = 'virtport2'108 # create port class of name 'virtport'.109 dataif.make_dataif_port(port1_name)110 dataif.make_dataif_port(port2_name)111 # add port object into pmd.112 fx_port1 = pmd1.add_port(port1_name)113 fx_port1.numa_id = pmd1.numa_id114 fx_port2 = pmd2.add_port(port2_name)115 fx_port2.numa_id = pmd2.numa_id116 # add a dummy rxq into port.117 fx_p1rxq = fx_port1.add_rxq(0)118 fx_p1rxq.pmd = pmd1119 fx_p2rxq = fx_port2.add_rxq(0)120 fx_p2rxq.pmd = pmd2121 # add some cpu consumption for these rxqs.122 for i in range(0, config.ncd_samples_max):123 fx_p1rxq.cpu_cyc[i] = (1000 + (100 * i))124 fx_p2rxq.cpu_cyc[i] = (2000 + (200 * i))125# Fixture:126# Create two pmd thread objects where in, one pmd has two single-queued127# ports, while the other is idle (without any port/rxq).128def fx_1pmd_for_2rxq(testobj):129 # retrieve pmd object.130 pmd1 = testobj.pmd_map[testobj.core1_id]131 # one dummy port is required for this test.132 port1_name = 'virtport1'133 port2_name = 'virtport2'134 # create port class of name 'virtport'.135 dataif.make_dataif_port(port1_name)136 dataif.make_dataif_port(port2_name)137 # add port object into pmd.138 fx_port1 = pmd1.add_port(port1_name)139 fx_port1.numa_id = pmd1.numa_id140 # add second port as well into pmd 1141 fx_port2 = pmd1.add_port(port2_name)142 fx_port2.numa_id = pmd1.numa_id143 # add a dummy rxq into port.144 fx_p1rxq = fx_port1.add_rxq(0)145 fx_p1rxq.pmd = pmd1146 fx_p2rxq = fx_port2.add_rxq(0)147 fx_p2rxq.pmd = pmd1148 # add some cpu consumption for these rxqs.149 for i in range(0, config.ncd_samples_max):150 fx_p1rxq.cpu_cyc[i] = (1000 + (100 * i))151 fx_p2rxq.cpu_cyc[i] = (2000 + (200 * i))152class Test_pmd_load_variance_TwoPmd(TestCase):153 """154 Test variance_value for one or more rxq handled by twp pmds.155 """156 pmd_map = dict()157 core1_id = 0158 core2_id = 1159 # setup test environment160 def setUp(self):161 # turn off limited info shown in assert failure for pmd object.162 self.maxDiff = None163 # a noop handler for debug info log.164 class NlogNoop(object):165 def info(self, *args):166 None167 def debug(self, *args):168 None169 dataif.Context.nlog = NlogNoop()170 # create one pmd object.171 fx_pmd1 = dataif.Dataif_Pmd(self.core1_id)172 fx_pmd2 = dataif.Dataif_Pmd(self.core2_id)173 # let it be in numa 0.174 fx_pmd1.numa_id = 0175 fx_pmd2.numa_id = 0176 # add some cpu consumption for these pmds.177 for i in range(0, config.ncd_samples_max):178 fx_pmd1.idle_cpu_cyc[i] = (1 + (1 * i))179 fx_pmd1.proc_cpu_cyc[i] = (900 + (90 * i))180 fx_pmd1.rx_cyc[i] = (1000 + (100 * i))181 for i in range(0, config.ncd_samples_max):182 fx_pmd2.idle_cpu_cyc[i] = (1000 + (100 * i))183 fx_pmd2.proc_cpu_cyc[i] = (9500 + (950 * i))184 fx_pmd2.rx_cyc[i] = (10000 + (1000 * i))185 self.pmd_map[self.core1_id] = fx_pmd1186 self.pmd_map[self.core2_id] = fx_pmd2187 return188 # Test case:189 # With two threads from same numa, each handling only one single-queued190 # port, variance_value191 def test_one_rxq_lnuma(self):192 # set different numa for pmds193 pmd1 = self.pmd_map[self.core1_id]194 pmd2 = self.pmd_map[self.core2_id]195 pmd1.numa_id = 0196 pmd2.numa_id = 0197 fx_2pmd_for_1rxq_each(self)198 dataif.update_pmd_load(self.pmd_map)199 variance_value = dataif.pmd_load_variance(self.pmd_map)200 self.assertEqual(int(variance_value), 17)201 # del port object from pmd.202 # TODO: create fx_ post deletion routine for clean up203 pmd1.del_port('virtport1')204 pmd2.del_port('virtport2')205 # Test case:206 # With two threads from same numa, where one pmd thread is handling207 # two single-queued ports, while the other is doing nothing,208 # check variance_value.209 @mock.patch('netcontrold.lib.util.open')210 def test_two_rxq_lnuma(self, mock_open):211 mock_open.side_effect = [212 mock.mock_open(read_data=_FX_CPU_INFO).return_value213 ]214 # set different numa for pmds215 pmd1 = self.pmd_map[self.core1_id]216 pmd2 = self.pmd_map[self.core2_id]217 pmd1.numa_id = 0218 pmd2.numa_id = 0219 fx_1pmd_for_2rxq(self)220 dataif.update_pmd_load(self.pmd_map)221 variance_value = dataif.pmd_load_variance(self.pmd_map)222 self.assertEqual(int(variance_value), 17)223 # del port object from pmd.224 # TODO: create fx_ post deletion routine for clean up225 pmd1.del_port('virtport1')226 pmd1.del_port('virtport2')227 # Test case:228 # With two threads from different numa, each handling one single-queued229 # port, check variance_value.230 def test_one_rxq_rnuma(self):231 # set different numa for pmds232 pmd1 = self.pmd_map[self.core1_id]233 pmd2 = self.pmd_map[self.core2_id]234 pmd1.numa_id = 0235 pmd2.numa_id = 1236 fx_2pmd_for_1rxq_each(self)237 dataif.update_pmd_load(self.pmd_map)238 variance_value = dataif.pmd_load_variance(self.pmd_map)239 self.assertEqual(int(variance_value), 17)240 pmd1.del_port('virtport1')...

Full Screen

Full Screen

run.py

Source:run.py Github

copy

Full Screen

1import subprocess2import os3DEL_UID = os.getenv("DEL_UID", 1000)4DEL_GID = os.getenv("DEL_GID", 1000)5DEL_PORT = os.getenv("DEL_PORT", 58846)6DEL_INT = os.getenv('DEL_INT', 'tun0')7print("Configuring firewall settings")8os.popen("ip route del 128.0.0.0/1")9os.popen("cron &")10if DEL_UID != 1000:11 subprocess.run(["/usr/sbin/usermod", "-u", f"{DEL_UID}", "deluge"])12if DEL_GID != 1000:13 subprocess.run(["/usr/sbin/groupmod", "-g", f"{DEL_GID}", "deluge"])14if not os.path.exists("/config/.config/deluge"):15 print("Making config directory.")16 os.makedirs("/config/.config/deluge")17 subprocess.run(["cp", "/usr/local/etc/core.conf", "/config/.config/deluge/"])18 subprocess.run(["chown", "-R", f"{DEL_UID}:{DEL_GID}", "/config"])19webCmd = ["su", "deluge", "-c", "/usr/bin/deluge-web"]20webPort = os.getenv("WEB_PORT", None)21if webPort:22 webCmd.append("-p")23 webCmd.append(webPort)24subprocess.Popen(webCmd)25print("Deluged Init")26# subprocess.run(["/usr/bin/deluged", "--do-not-daemonize", "-U", "deluge", "-g", "deluge", "-o", DEL_INT])...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful