How to use test_4 method in Lemoncheesecake

Best Python code snippet using lemoncheesecake

test_transformations.py

Source:test_transformations.py Github

copy

Full Screen

1import unittest23import numpy as np4import pandas as pd5from pandas.testing import assert_frame_equal67from pandas_extras import (8 concatenate_columns, expand_list, expand_lists,9 extract_dict_key, extract_dictionary, merge_columns,10)111213class TransformationsTestCase(unittest.TestCase):14 def test_expand_list_pos_01(self):15 df = pd.DataFrame(16 {17 'test_index': [1, 2, 3, 4, 5, 6],18 'trial_num': [1, 2, 3, 1, 2, 3],19 'subject': [1, 1, 1, 2, 2, 2],20 'samples': [21 [1, 2, 3, 4],22 [1, 2, 3],23 [1, 2],24 [1],25 [],26 None,27 ]28 }29 ).set_index('test_index')30 expected = pd.DataFrame(31 {32 'newcol': [1, 2, 3, 4, 1, 2, 3, 1, 2, 1, None, None],33 'subject': [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2],34 'trial_num': [1, 1, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3],35 'test_index': [1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6]36 }37 )38 assert_frame_equal(expand_list(df, 'samples', 'newcol').reset_index(),39 expected, check_like=True, check_dtype=False)404142 def test_expand_list_pos_02(self):43 df = pd.DataFrame(44 {45 'trial_num': [1, 2, 3, 1, 2, 3],46 'subject': [1, 1, 1, 2, 2, 2],47 'samples': [48 [1, 2, 3, 4],49 [1, 2, 3],50 [1, 2],51 [1],52 [],53 None,54 ]55 }56 ).set_index(['trial_num', 'subject'])57 expected = pd.DataFrame(58 {59 'samples': [1, 2, 3, 4, 1, 1, 2, 3, None, 1, 2, None],60 'subject': [1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2],61 'trial_num': [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]62 }63 )64 assert_frame_equal(expand_list(df, 'samples').reset_index(), expected, check_like=True)6566 def test_expand_list_pos_03(self):67 df = pd.DataFrame(68 {69 'trial_num': [1, 2, 3, 1, 2, 3],70 'subject': [1, 1, 1, 2, 2, 2],71 'samples': [72 [1, 2, 3, 4],73 [1, 2, 3],74 [1, 2],75 [1],76 [],77 np.NaN,78 ]79 }80 ).set_index(['trial_num', 'subject'])81 expected = pd.DataFrame(82 {83 'samples': [1, 2, 3, 4, 1, 1, 2, 3, None, 1, 2, None],84 'subject': [1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2],85 'trial_num': [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]86 }87 )88 assert_frame_equal(expand_list(df, 'samples').reset_index(), expected, check_like=True)8990 def test_expand_lists_pos_01(self):91 df = pd.DataFrame(92 {93 'trial_num': [1, 2, 3, 1, 2, 3],94 'subject': [1, 1, 1, 2, 2, 2],95 'samples': [96 [1, 2, 3, 4],97 [1, 2, 3],98 [1, 2],99 [1],100 [],101 None,102 ],103 'samples2': [104 [1, 2, 3, 4],105 [1, 2, 3],106 [1, 2],107 [1],108 [],109 None,110 ]111 }112 )113 expected = pd.DataFrame(114 {115 'newcol': [1, 2, 3, 4, 1, 2, 3, 1, 2, 1, None, None],116 'newcol2': [1, 2, 3, 4, 1, 2, 3, 1, 2, 1, None, None],117 'subject': [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2],118 'trial_num': [1, 1, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3]119 }120 )121 assert_frame_equal(122 expand_lists(df, ['samples', 'samples2'], ['newcol', 'newcol2']).reset_index().drop('index', axis=1),123 expected,124 check_like=True125 )126127 def test_expand_lists_pos_02(self):128 df = pd.DataFrame(129 {130 'trial_num': [1, 2, 3, 1, 2, 3],131 'subject': [1, 1, 1, 2, 2, 2],132 'samples': [133 [1, 2, 3, 4],134 [1, 2, 3],135 [1],136 [1],137 [],138 None,139 ],140 'samples2': [141 [1, 2],142 [3],143 [1, 2],144 [1],145 [],146 None,147 ]148 }149 ).set_index(['trial_num', 'subject'])150 expected = pd.DataFrame(151 {152 'samples': [1, 2, 3, 4, 1, 1, 2, 3, None, 1, None, None],153 'samples2': [1, 2, None, None, 1, 3, None, None, None, 1, 2, None],154 'subject': [1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2],155 'trial_num': [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]156 }157 )158 assert_frame_equal(expand_lists(df, ['samples', 'samples2']).reset_index(), expected, check_like=True)159160 def test_expand_lists_pos_03(self):161 df = pd.DataFrame(162 {163 'trial_num': [1, 2, 3, 1, 2, 3],164 'subject': [1, 1, 1, 2, 2, 2],165 'samples': [166 [{'testkey': 1}, {'testkey': 2}, {'testkey': 3}, {'testkey': 4}],167 [{'testkey': 1}, {'testkey': 2}, {'testkey': 3}],168 [{'testkey': 1}, {'testkey': 2}],169 [{'testkey': 1}],170 [],171 None,172 ],173 'other_samples': [174 [1, 2, 3, 4],175 [1, 2, 3],176 [1, 2],177 [1],178 [],179 None,180 ]181 }182 )183 expected = pd.DataFrame(184 {185 'newcol': [{'testkey': 1.0}, {'testkey': 2.0}, {'testkey': 3.0}, {'testkey': 4.0},186 {'testkey': 1.0}, {'testkey': 2.0}, {'testkey': 3.0}, {'testkey': 1.0},187 {'testkey': 2.0}, {'testkey': 1.0}, None, None],188 'newcol2': [1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 1.0, 2.0, 1.0, None, None],189 'subject': [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2],190 'trial_num': [1, 1, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3]191 }192 )193 assert_frame_equal(194 expand_lists(df, ['samples', 'other_samples'], ['newcol', 'newcol2']).reset_index(drop=True),195 expected,196 check_like=True197 )198199 def test_expand_lists_pos_04(self):200 df = pd.DataFrame(201 {202 'trial_num': [1, 2, 3, 1, 2, 3],203 'subject': [1, 1, 1, 2, 2, 2],204 'samples': [205 [{'testkey': 1}, {'testkey': 2}, {'testkey': 3}, {'testkey': 4}],206 [{'testkey': 1}, {'testkey': 2}, {'testkey': 3}],207 [{'testkey': 1}, {'testkey': 2}],208 [{'testkey': 1}],209 [],210 ['this will be NaN, as None is not iterable'],211 ],212 'other_samples': [213 [1, 2, 3, 4],214 [1, 2, 3],215 [1, 2],216 [],217 [1],218 None,219 ]220 }221 )222 expected = pd.DataFrame(223 {224 'newcol': [{'testkey': 1.0}, {'testkey': 2.0}, {'testkey': 3.0}, {'testkey': 4.0},225 {'testkey': 1.0}, {'testkey': 2.0}, {'testkey': 3.0}, {'testkey': 1.0},226 {'testkey': 2.0}, {'testkey': 1.0}, None, None],227 'newcol2': [1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 1.0, 2.0, None, 1.0, None],228 'subject': [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2],229 'trial_num': [1, 1, 1, 1, 2, 2, 2, 3, 3, 1, 2, 3]230 }231 )232 assert_frame_equal(233 expand_lists(df, ['samples', 'other_samples'], ['newcol', 'newcol2']).reset_index(drop=True),234 expected,235 check_like=True236 )237238 def test_extract_dict_key_pos_01(self):239 df = pd.DataFrame(240 {241 'trial_num': [1, 2, 1, 2],242 'subject': [1, 1, 2, 2],243 'samples': [244 {'A': 1, 'B': 2, 'C': None},245 {'A': 3, 'B': 4, 'C': 5},246 {'A': 6, 'B': 7, 'C': None},247 None,248 ]249 }250 )251 expected = pd.DataFrame(252 {253 'trial_num': [1, 2, 1, 2],254 'subject': [1, 1, 2, 2],255 'samples': [256 {'A': 1, 'B': 2, 'C': None},257 {'A': 3, 'B': 4, 'C': 5},258 {'A': 6, 'B': 7, 'C': None},259 None,260 ],261 'samples.A': [1, 3, 6, None]262 }263 )264 assert_frame_equal(extract_dict_key(df, 'samples', 'A').reset_index(drop=True), expected, check_like=True)265266 def test_extract_dict_key_pos_02(self):267 df = pd.DataFrame(268 {269 'trial_num': [1, 2, 1, 2],270 'subject': [1, 1, 2, 2],271 'samples': [272 {'A': 1, 'B': 2, 'C': None},273 {'A': 3, 'B': 4, 'C': 5},274 {'A': 6, 'B': 7, 'C': None},275 {'B': 8, 'C': None},276 ]277 }278 )279 expected = pd.DataFrame(280 {281 'trial_num': [1, 2, 1, 2],282 'subject': [1, 1, 2, 2],283 'samples': [284 {'A': 1, 'B': 2, 'C': None},285 {'A': 3, 'B': 4, 'C': 5},286 {'A': 6, 'B': 7, 'C': None},287 {'B': 8, 'C': None},288 ],289 'newcol': [1, 3, 6, None]290 }291 )292 assert_frame_equal(293 extract_dict_key(df, 'samples', 'A', 'newcol').reset_index(drop=True),294 expected, check_like=True295 )296297 def test_extract_dict_key_pos_03(self):298 df = pd.DataFrame(299 {300 'trial_num': [1, 2, 1, 2],301 'subject': [1, 1, 2, 2],302 'samples': [np.NaN, np.NaN, np.NaN, np.NaN]303 }304 )305 expected = pd.DataFrame(306 {307 'trial_num': [1, 2, 1, 2],308 'subject': [1, 1, 2, 2],309 'samples': [np.NaN, np.NaN, np.NaN, np.NaN],310 'newcol': [np.NaN, np.NaN, np.NaN, np.NaN]311 }312 )313 assert_frame_equal(314 extract_dict_key(df, 'samples', 'A', 'newcol').reset_index(drop=True),315 expected, check_like=True316 )317318 def test_extract_dict_key_pos_04(self):319 df = pd.DataFrame(320 {321 'trial_num': [1, 2, 1, 2],322 'subject': [1, 1, 2, 2],323 }324 )325 with self.assertRaises(KeyError):326 extract_dict_key(df, 'samples', 'A', 'newcol')327328 def test_extract_dict_key_pos_05(self):329 df = pd.DataFrame(330 columns=('trial_num', 'subject', 'samples')331 )332 self.assertIn('newcol', extract_dict_key(df, 'samples', 'A', 'newcol').columns.to_list())333334 def test_extract_dictionary_pos_01(self):335 df = pd.DataFrame(336 {337 'trial_num': [1, 2, 1, 2],338 'subject': [1, 1, 2, 2],339 'samples': [340 {'A': 1, 'B': 2, 'C': None},341 {'A': 3, 'B': 4, 'C': 5},342 {'A': 6, 'B': 7, 'C': None},343 None,344 ]345 }346 )347 expected = pd.DataFrame(348 {349 'trial_num': [1, 2, 1, 2],350 'subject': [1, 1, 2, 2],351 'samples.A': [1, 3, 6, None],352 'samples.B': [2, 4, 7, None],353 }354 )355 assert_frame_equal(356 extract_dictionary(df, 'samples', ['A', 'B']).reset_index(drop=True),357 expected, check_like=True358 )359360 def test_extract_dictionary_pos_02(self):361 df = pd.DataFrame(362 {363 'trial_num': [1, 2, 1, 2],364 'subject': [1, 1, 2, 2],365 'samples': [366 {'A': 1, 'B': 2, 'C': None},367 {'A': 3, 'B': 4, 'C': 5},368 {'A': 6, 'B': 7, 'C': None},369 None,370 ]371 }372 )373 expected = pd.DataFrame(374 {375 'trial_num': [1, 2, 1, 2],376 'subject': [1, 1, 2, 2],377 'newcol.A': [1, 3, 6, None],378 'newcol.B': [2, 4, 7, None],379 }380 )381 assert_frame_equal(382 extract_dictionary(df, 'samples', ['A', 'B'], 'newcol').reset_index(drop=True),383 expected, check_like=True384 )385386 def test_extract_dictionary_pos_03(self):387 df = pd.DataFrame(388 {389 'trial_num': [1, 2, 1, 2],390 'subject': [1, 1, 2, 2],391 'samples': [392 {'A': 1, 'B': 2, 'C': None},393 {'A': 3, 'B': 4, 'C': 5},394 {'A': 6, 'B': 7, 'C': None},395 None,396 ]397 }398 )399 expected = pd.DataFrame(400 {401 'trial_num': [1, 2, 1, 2],402 'subject': [1, 1, 2, 2],403 'samples.A': [1, 3, 6, None],404 'samples.B': [2, 4, 7, None],405 'samples.C': [None, 5, None, None]406 }407 )408 assert_frame_equal(extract_dictionary(df, 'samples').reset_index(drop=True), expected, check_like=True)409410 def test_extract_dictionary_pos_04(self):411 df = pd.DataFrame(412 {413 'trial_num': [1, 2, 1, 2],414 'subject': [1, 1, 2, 2],415 'samples': [416 {'A': 1, 'B': 2, 'C': None},417 {'A': 3, 'B': 4, 'C': 5},418 {'A': 6, 'B': 7, 'C': None},419 None,420 ]421 }422 )423 expected = pd.DataFrame(424 {425 'trial_num': [1, 2, 1, 2],426 'subject': [1, 1, 2, 2],427 'A': [1, 3, 6, None],428 'B': [2, 4, 7, None],429 'C': [None, 5, None, None]430 }431 )432 assert_frame_equal(433 extract_dictionary(df, 'samples', prefix='').reset_index(drop=True),434 expected, check_like=True435 )436437 def test_extract_dictionary_pos_05(self):438 df = pd.DataFrame(439 {440 'trial_num': [1, 2, 1, 2],441 'subject': [1, 1, 2, 2],442 'samples': [None, None, None, None]443 }444 )445 expected = pd.DataFrame(446 {447 'trial_num': [1, 2, 1, 2],448 'subject': [1, 1, 2, 2]449 }450 )451 assert_frame_equal(452 extract_dictionary(df, 'samples', prefix='').reset_index(drop=True),453 expected, check_like=True454 )455456 def test_extract_dictionary_pos_06(self):457 df = pd.DataFrame({458 'trial_num': [1, 2, 1, 2],459 'subject': [1, 1, 2, 2],460 'samples': [461 None,462 {'A': 1, 'B': 2, 'C': None},463 {'A': 3, 'B': 4, 'C': 5},464 {'A': 6, 'B': 7, 'C': None},465 ]466 })467 expected = pd.DataFrame({468 'trial_num': [1, 2, 1, 2],469 'subject': [1, 1, 2, 2],470 'A': [None, 1, 3, 6],471 'B': [None, 2, 4, 7],472 'C': [None, None, 5, None]473 })474 assert_frame_equal(475 extract_dictionary(df, 'samples', prefix='').reset_index(drop=True),476 expected, check_like=True477 )478479 def test_merge_columns(self):480 dataframe = pd.DataFrame([481 {482 'test_1': pd.NaT,483 'test_2': [],484 'test_3': 'TEST',485 'test_4': 'TEST2'486 },487 {488 'test_1': 'TEST3',489 'test_2': ['TEST'],490 'test_3': 'TEST',491 'test_4': 'TEST2'492 },493 {494 'test_1': np.NaN,495 'test_2': None,496 'test_3': 'TEST5',497 'test_4': 'TEST6'498 }499 ])500 expected_result_first = pd.DataFrame([501 {502 'test_1': None,503 'test_2': [],504 'test_3': 'TEST',505 'test_4': 'TEST2',506 'new_col_name': 'TEST'507 },508 {509 'test_1': 'TEST3',510 'test_2': ['TEST'],511 'test_3': 'TEST',512 'test_4': 'TEST2',513 'new_col_name': 'TEST3'514 },515 {516 'test_1': None,517 'test_2': None,518 'test_3': 'TEST5',519 'test_4': 'TEST6',520 'new_col_name': 'TEST5'521 }522 ])523 expected_result_last = pd.DataFrame([524 {525 'test_1': None,526 'test_2': [],527 'test_3': 'TEST',528 'test_4': 'TEST2',529 'new_col_name': 'TEST2'530 },531 {532 'test_1': 'TEST3',533 'test_2': ['TEST'],534 'test_3': 'TEST',535 'test_4': 'TEST2',536 'new_col_name': 'TEST2'537 },538 {539 'test_1': None,540 'test_2': None,541 'test_3': 'TEST5',542 'test_4': 'TEST6',543 'new_col_name': 'TEST6'544 }545 ])546 merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', keep='first')547 assert_frame_equal(dataframe, expected_result_first, check_like=True)548 merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', keep='last')549 assert_frame_equal(dataframe, expected_result_last, check_like=True)550 with self.assertRaises(ValueError):551 merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', keep='something_wrong')552 with self.assertRaises(ValueError):553 merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', aggr=sum, keep='first')554555 def test_merge_columns_aggr(self):556 dataframe = pd.DataFrame([557 {558 'test_1': 1,559 'test_2': [],560 'test_3': 5,561 'test_4': 9562 },563 {564 'test_1': 0,565 'test_2': ['TEST'],566 'test_3': 9,567 'test_4': 7568 },569 {570 'test_1': 1,571 'test_2': None,572 'test_3': 8,573 'test_4': 1574 }575 ])576 expected_result = pd.DataFrame([577 {578 'test_1': 1,579 'test_2': [],580 'test_3': 5,581 'test_4': 9,582 'new_col_name': 15583 },584 {585 'test_1': 0,586 'test_2': ['TEST'],587 'test_3': 9,588 'test_4': 7,589 'new_col_name': 16590 },591 {592 'test_1': 1,593 'test_2': None,594 'test_3': 8,595 'test_4': 1,596 'new_col_name': 10597 }598 ])599 merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', aggr=sum)600 assert_frame_equal(dataframe, expected_result, check_like=True, check_dtype=False)601 with self.assertRaises(ValueError):602 merge_columns(dataframe, ['test_1', 'test_3', 'test_4'], 'new_col_name', aggr='sum')603604 def test_concatenate_columns_pos_01(self):605 dataframe = pd.DataFrame([606 {'key': 'TICKET-1', 'assignee': 'Bob', 'reporter': 'Alice'},607 {'key': 'TICKET-2', 'assignee': 'Bob', 'reporter': 'Alice'},608 {'key': 'TICKET-3', 'assignee': 'Bob', 'reporter': 'Alice'},609 ]).set_index('key')610 expected = pd.DataFrame([611 {'key': 'TICKET-1', 'user': 'Bob'},612 {'key': 'TICKET-1', 'user': 'Alice'},613 {'key': 'TICKET-2', 'user': 'Bob'},614 {'key': 'TICKET-2', 'user': 'Alice'},615 {'key': 'TICKET-3', 'user': 'Bob'},616 {'key': 'TICKET-3', 'user': 'Alice'},617 ]).set_index('key')618 assert_frame_equal(concatenate_columns(dataframe, ['assignee', 'reporter'], 'user'), expected)619620 def test_concatenate_columns_pos_02(self):621 dataframe = pd.DataFrame([622 {'key': 'TICKET-1', 'assignee': 'Bob', 'reporter': 'Alice'},623 {'key': 'TICKET-2', 'assignee': 'Bob', 'reporter': 'Alice'},624 {'key': 'TICKET-3', 'assignee': 'Bob', 'reporter': 'Alice'},625 ]).set_index('key')626 expected = pd.DataFrame([627 {'key': 'TICKET-1', 'user': 'Bob', 'role': 'assignee'},628 {'key': 'TICKET-1', 'user': 'Alice', 'role': 'reporter'},629 {'key': 'TICKET-2', 'user': 'Bob', 'role': 'assignee'},630 {'key': 'TICKET-2', 'user': 'Alice', 'role': 'reporter'},631 {'key': 'TICKET-3', 'user': 'Bob', 'role': 'assignee'},632 {'key': 'TICKET-3', 'user': 'Alice', 'role': 'reporter'},633 ]).set_index('key')[['user', 'role']]634 assert_frame_equal(635 concatenate_columns(dataframe, ['assignee', 'reporter'], 'user', descriptor='role'),636 expected637 )638639 def test_concatenate_columns_pos_03(self):640 dataframe = pd.DataFrame([641 {'key': 'TICKET-1', 'assignee': 'Bob', 'reporter': 'Alice'},642 {'key': 'TICKET-2', 'assignee': 'Bob', 'reporter': 'Alice'},643 {'key': 'TICKET-3', 'assignee': 'Bob', 'reporter': 'Alice'},644 ]).set_index('key')645 expected = pd.DataFrame([646 {'key': 'TICKET-1', 'user': 'Bob', 'role': 'a'},647 {'key': 'TICKET-1', 'user': 'Alice', 'role': 'r'},648 {'key': 'TICKET-2', 'user': 'Bob', 'role': 'a'},649 {'key': 'TICKET-2', 'user': 'Alice', 'role': 'r'},650 {'key': 'TICKET-3', 'user': 'Bob', 'role': 'a'},651 {'key': 'TICKET-3', 'user': 'Alice', 'role': 'r'},652 ]).set_index('key')[['user', 'role']]653 mapper = {'assignee': 'a', 'reporter': 'r'}654 assert_frame_equal(655 concatenate_columns(dataframe, ['assignee', 'reporter'], 'user', descriptor='role', mapper=mapper),656 expected657 )658659 def test_concatenate_columns_non_existent_col(self):660 dataframe = pd.DataFrame([661 {'key': 'TICKET-1', 'assignee': 'Bob', 'reporter': 'Alice'},662 {'key': 'TICKET-2', 'assignee': 'Bob', 'reporter': 'Alice'},663 {'key': 'TICKET-3', 'assignee': 'Bob', 'reporter': 'Alice'},664 ]).set_index('key')665 expected = pd.DataFrame([666 {'key': 'TICKET-1', 'user': 'Bob', 'role': 'a'},667 {'key': 'TICKET-1', 'user': 'Alice', 'role': 'r'},668 {'key': 'TICKET-2', 'user': 'Bob', 'role': 'a'},669 {'key': 'TICKET-2', 'user': 'Alice', 'role': 'r'},670 {'key': 'TICKET-3', 'user': 'Bob', 'role': 'a'},671 {'key': 'TICKET-3', 'user': 'Alice', 'role': 'r'},672 ]).set_index('key')[['user', 'role']]673 mapper = {'assignee': 'a', 'reporter': 'r'}674 assert_frame_equal(675 concatenate_columns(dataframe, ['assignee', 'reporter', 'creator'], 'user', descriptor='role', mapper=mapper),676 expected677 )678679680if __name__ == '__main__': ...

Full Screen

Full Screen

HCHEdistmods.py

Source:HCHEdistmods.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2"""3Created on Mon Dec 4 08:47:49 201745@author: MatthewLazenby6"""7import tkinter as tk8import numpy as np9import pandas as pd10from tkinter import messagebox11dist = tk.Tk()12dist.title("R744 Distributors")1314lbl_evap_t = tk.Label(dist, text = "Evap Temperature(°C):", width = 20)15lbl_cap = tk.Label(dist, text = "Capacity(kW):", width = 20)16lbl_liq_t = tk. Label(dist, text = "Liquid Temperature(°C):", width = 20)17lbl_circ = tk.Label(dist, text = "Number of Circuits:", width = 20)18lbl_tail_len = tk.Label(dist, text = "Tail Length(mm):", width = 20)19txtbx_cap = tk.Entry(dist, width = 20)20txtbx_evap_t = tk.Entry(dist, width = 20)21Liqs = ['-20°C','-15°C','-10°C','-5°C','0°C','5°C']22liq_temps = tk.StringVar(dist)23liq_temps.set(Liqs[5])24txtbx_liq_t = tk.OptionMenu(dist, liq_temps, *Liqs)25Tubes = [300, 450, 600, 760, 900, 1050, 1200, 1350, 1500, 1650, 1800]26tube_lens = tk.StringVar(dist)27tube_lens.set(Tubes[0])28txtbx_circ = tk.Entry(dist, width = 20)29txtbx_tail_len = tk.OptionMenu(dist, tube_lens,*Tubes)3031lbl_evap_t.grid(row = 0, column = 0, sticky = tk.E)32txtbx_evap_t.grid(row = 0, column = 1)33lbl_cap.grid(row = 1, column = 0, sticky = tk.E)34txtbx_cap.grid(row = 1, column = 1)35lbl_liq_t.grid(row = 2, column = 0, sticky = tk.E)36txtbx_liq_t.grid(row = 2, column = 1)37lbl_circ.grid(row = 3, column = 0, sticky = tk.E)38txtbx_circ.grid(row = 3, column = 1)39lbl_tail_len.grid(row = 4, column = 0, sticky = tk.E)40txtbx_tail_len.grid(row = 4, column = 1)414243def dist_nozz_caps():44 count = 045 test_4 = "!@#$%^&*()_+=<>,?/':;{}[]\|`~-"46 for i in np.arange(0, len(txtbx_evap_t.get()), 1):47 for j in np.arange(0, len(test_4), 1):48 if txtbx_evap_t.get()[i] == test_4[j]:49 count = count + 150 if count > 0:51 messagebox.showerror("Error","Be sure to enter the values in correctly.")52 else:53 noz_cap = pd.read_excel('Distributor table.xlsx', sheet_name = 'Distributor Nozzle Capacities')54 test_1 = txtbx_evap_t.get()55 if any(c.isalpha() for c in test_1):56 messagebox.showerror("Error","Enter in a valid value for Evap Temperature.")57 else:58 evap_t = float(txtbx_evap_t.get())59 if evap_t >= -25:60 return np.array(noz_cap[['Nozzle number:','-20°C']])61 elif evap_t < -25 and evap_t >= -35:62 return np.array(noz_cap[['Nozzle number:','-30°C']])63 elif evap_t < -35:64 return np.array(noz_cap[['Nozzle number:','-40°C']])65 66def dist_tube_caps():67 count = 068 test_4 = "!@#$%^&*()_+=<>,?/':;{}[]\|`~"69 for i in np.arange(0, len(txtbx_evap_t.get()), 1):70 for j in np.arange(0, len(test_4), 1):71 if txtbx_evap_t.get()[i] == test_4[j]:72 count = count + 173 if count > 0:74 messagebox.showerror("Error","Be sure to enter the values in correctly.")75 else:76 test_1 = txtbx_evap_t.get()77 if any(c.isalpha() for c in test_1):78 messagebox.showerror("Error","Enter in a valid value for Evap Temperature.")79 else:80 evap_t = float(txtbx_evap_t.get())81 tube_cap = pd.read_excel('Distributor table.xlsx', sheet_name = 'Distributor Capacity per tube')82 if evap_t >= -25:83 return np.array(tube_cap[['Tube Diameter:','-20°C']])84 elif evap_t < -25 and evap_t >= -35:85 return np.array(tube_cap[['Tube Diameter:','-30°C']])86 elif evap_t < -35:87 return np.array(tube_cap[['Tube Diameter:','-40°C']])88 89 90def calc_dist():91 test_1 = txtbx_evap_t.get()92 test_2 = txtbx_cap.get()93 test_3 = txtbx_circ.get()94 test_4 = "!@#$%^&*()_+=<>,?/':;{}[]\|`~"95 count = 096 for i in np.arange(0, len(txtbx_evap_t.get()), 1):97 for j in np.arange(0, len(test_4), 1):98 if txtbx_evap_t.get()[i] == test_4[j]:99 count = count + 1100 for i in np.arange(0, len(txtbx_cap.get()), 1):101 for j in np.arange(0, len(test_4), 1):102 if txtbx_cap.get()[i] == test_4[j]:103 count = count + 1104 for i in np.arange(0, len(txtbx_circ.get()), 1):105 for j in np.arange(0, len(test_4), 1):106 if txtbx_circ.get()[i] == test_4[j]:107 count = count + 1108 if any(c.isalpha() for c in test_1) or any(c.isalpha() for c in test_2) or any(c.isalpha() for c in test_3) or count>0 or txtbx_evap_t.get() == "" or txtbx_circ.get() == "" or txtbx_cap.get() == "":109 messagebox.showerror("Error","Be sure to enter the values in correctly.")110 else:111 capacity = float(txtbx_cap.get())112 liq_t = str(liq_temps.get())113 circ = int(txtbx_circ.get())114 tail_len = float(tube_lens.get())115116 liq_temp = pd.read_excel('Distributor table.xlsx', sheet_name = 'Liquid Temperature')117 liq_cf = np.array(liq_temp[liq_t])[0]118 nozzy = capacity/liq_cf119 for i in np.arange(0,len(dist_nozz_caps())-1,1):120 compare = dist_nozz_caps()[i]121 compare_1 = dist_nozz_caps()[i+1]122 if compare[1] < nozzy and compare_1[1] > nozzy:123 noz_capacity = compare_1[0]124 elif compare[1] == nozzy:125 noz_capacity = compare[0]126 elif compare_1[1] == nozzy:127 noz_capacity = compare_1[0]128129 if nozzy >= float(dist_nozz_caps()[0][1]) and nozzy <= float(dist_nozz_caps()[len(dist_nozz_caps())-1][1]): 130 tail_table = pd.read_excel('Distributor table.xlsx', sheet_name = 'Tubelength') 131 tail_cf = np.array(tail_table[tail_len])[0]132 tube_cap = ((capacity/circ)/tail_cf)/liq_cf133 if tube_cap >= float(dist_tube_caps()[0][1]) and tube_cap <= float(dist_tube_caps()[len(dist_tube_caps())-1][1]):134 for k in np.arange(0,len(dist_tube_caps())-1,1):135 tube = dist_tube_caps()[k]136 tube_1 = dist_tube_caps()[k+1]137 if tube[1] < tube_cap and tube_1[1] > tube_cap:138 tube_di = tube_1[0]139 elif tube[1] == tube_cap:140 tube_di = tube_1[0]141 elif tube_1[1] == tube_cap:142 tube_di = tube_1[0]143 elif tube_cap < float(dist_tube_caps()[0][1]):144 tube_di = dist_tube_caps()[0][0]145 elif tube_cap > float(dist_tube_caps()[len(dist_tube_caps())-1][1]):146 messagebox.showerror("Error", "The tube diameter could not be calculated based on the values in the Sporlan tables.")147148 hche = pd.read_excel('Distributor table.xlsx', sheet_name = 'HCHE') 149 if tube_di == "3/16":150 mod = np.array(hche[['Model:','3/16','Nozzle size:','Conn. Size OD:','Body size ']])151 dist_mod = []152 elif tube_di == "1/4":153 mod = np.array(hche[['Model:','1/4','Nozzle size:','Conn. Size OD:','Body size ']])154 dist_mod = []155 elif tube_di == "5/16":156 mod = np.array(hche[['Model:','5/16','Nozzle size:','Conn. Size OD:','Body size ']])157 dist_mod = []158 elif tube_di == "3/8":159 mod = np.array(hche[['Model:','3/8','Nozzle size:','Conn. Size OD:','Body size ']])160 dist_mod = []161162 for i in np.arange(0,(len(mod)),1):163 chk = mod[i]164 chk_1 = chk[1]165 if str(chk_1) != 'nan':166 chk_1 = chk[1]167 for k in np.arange(0,len(chk_1),1):168 if chk_1[k] == "-":169 last = chk_1[k+1:len(chk_1)]170 first = chk_1[0:k]171 if int(first) <= int(circ) and int(last) >= int(circ):172 dist_mod.append(chk[0])173 if dist_mod == []:174 messagebox.showerror("Error","No distributor was calculted.")175 else:176 display = dist_mod, circ, tube_di, noz_capacity177 msg = "Possible distributor models:\n%a -%a -%a -%a"%display178 messagebox.showinfo("Suggested Distributors", msg)179 180 return dist_mod,noz_capacity,tube_di,int(circ)181 else:182 messagebox.showerror("Error", "The nozzle size could not be calculated based on the values in the Sporlan tables.")183184button_dist = tk.Button(dist, text = "Calculate Distributor",command = calc_dist)185button_dist.grid(row = 5, column = 0, columnspan = 6)186button_dist.focus_set() ...

Full Screen

Full Screen

config.py

Source:config.py Github

copy

Full Screen

1################################################################################2## ##3## This file is a part of TADEK. ##4## ##5## TADEK - Test Automation in a Distributed Environment ##6## (http://tadek.comarch.com) ##7## ##8## Copyright (C) 2011 Comarch S.A. ##9## All rights reserved. ##10## ##11## TADEK is free software for non-commercial purposes. For commercial ones ##12## we offer a commercial license. Please check http://tadek.comarch.com for ##13## details or write to tadek-licenses@comarch.com ##14## ##15## You can redistribute it and/or modify it under the terms of the ##16## GNU General Public License as published by the Free Software Foundation, ##17## either version 3 of the License, or (at your option) any later version. ##18## ##19## TADEK is distributed in the hope that it will be useful, ##20## but WITHOUT ANY WARRANTY; without even the implied warranty of ##21## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##22## GNU General Public License for more details. ##23## ##24## You should have received a copy of the GNU General Public License ##25## along with TADEK bundled with this file in the file LICENSE. ##26## If not, see http://www.gnu.org/licenses/. ##27## ##28## Please notice that Contributor Agreement applies to any contribution ##29## you make to TADEK. The Agreement must be completed, signed and sent ##30## to Comarch before any contribution is made. You should have received ##31## a copy of Contribution Agreement along with TADEK bundled with this file ##32## in the file CONTRIBUTION_AGREEMENT.pdf or see http://tadek.comarch.com ##33## or write to tadek-licenses@comarch.com ##34## ##35################################################################################36import unittest37import ConfigParser38import os39from tadek.core import config40__all__ = ["ConfigTest"]41_PROGRAM_NAME = 'unittest'42class ConfigTest(unittest.TestCase):43 _filenames = ('__testCaseConfig0__', '__testCaseConfig1__')44 _configCheck = ConfigParser.ConfigParser()45 _populated = False46 def _parserReload(self):47 self._configCheck = ConfigParser.ConfigParser()48 return self._configCheck.read((self._files), )49 def _setUpTestFiles(self):50 try:51 os.mkdir(os.path.dirname(self._files[1]))52 except OSError:53 pass54 test_file = open(self._files[1], 'w')55 configWriter = ConfigParser.ConfigParser()56 configWriter.add_section('Test_2')57 configWriter.set('Test_2', 'test2_1', '2')58 configWriter.set('Test_2', 'test2_2', '22')59 configWriter.set('Test_2', 'test2_3', 'True')60 configWriter.add_section('Test_3')61 configWriter.set('Test_3', 'test3_1', '3')62 configWriter.set('Test_3', 'test3_2', '33')63 configWriter.add_section('Test_4')64 configWriter.set('Test_4', 'test4_1', '4')65 configWriter.set('Test_4', 'test4_2', '44')66 configWriter.set('Test_4', 'test4_3', 'True')67 configWriter.set('Test_4', 'test4_4', 'Test')68 configWriter.write(test_file)69 test_file.close()70 test_file = open(self._files[1],'r')71 config.update(self._filenames[1], test_file)72 test_file.close()73 def setUp(self):74 files = []75 for file in self._filenames:76 files.append(os.path.join(config._USER_CONF_DIR, _PROGRAM_NAME,77 ''.join((file, config._CONF_FILE_EXT))))78 self._files = tuple(files)79 for file in files:80 try:81 os.remove(file)82 except OSError:83 pass84 def tearDown(self):85 for file_ in self._files:86 try:87 os.remove(file_)88 except OSError:89 pass90 def testGetProgramName(self):91 self.assertEqual(_PROGRAM_NAME, config.getProgramName())92 def testFileCreation(self):93 config.set(self._filenames[0])94 files = self._parserReload()95 self.assertTrue(self._files[0] in files)96 def testValueAddToNewFile(self):97 config.set(self._filenames[0], 'Test_0')98 config.set(self._filenames[0], 'Test_1', 'test1_1', 'True')99 config.set(self._filenames[0], 'Test_1', 'test1_2')100 self._parserReload()101 self.assertTrue(self._configCheck.has_section('Test_0'))102 self.assertTrue(self._configCheck.has_section('Test_1'))103 self.assertTrue(self._configCheck.get('Test_1', 'test1_1'))104 self.assertEqual('', self._configCheck.get('Test_1', 'test1_2'))105 def testValueAddExistingSection(self):106 self._setUpTestFiles()107 config.set(self._filenames[1], 'Test_5', 'test5_1', '5')108 self._parserReload()109 self.assertEqual('5', self._configCheck.get('Test_5', 'test5_1'))110 def testValueGet(self):111 self._setUpTestFiles()112 self.assertTrue(self._filenames[1] in config.get())113 self.assertTrue('Test_4' in config.get(self._filenames[1]))114 self.assertEqual(['test4_1', 'test4_2', 'test4_3', 'test4_4'],115 sorted(config.get(self._filenames[1], 'Test_4')))116 self.assertEqual('44', config.get(self._filenames[1], 'Test_4',117 'test4_2'))118 def testValueRemove(self):119 self._setUpTestFiles()120 self._parserReload()121 self.assertEqual('4', self._configCheck.get('Test_4', 'test4_1'))122 config.remove(self._filenames[1], 'Test_4', 'test4_1')123 self._parserReload()124 self.assertRaises(ConfigParser.NoOptionError,125 self._configCheck.get,'Test_4', 'test4_1')126 self.assertTrue('Test_4' in self._configCheck.sections())127 config.remove(self._filenames[1], 'Test_4')128 self.assertTrue(self._files[1] in self._parserReload())129 self.assertFalse('Test_4' in self._configCheck.sections())130 config.remove(self._filenames[1])131 self.assertFalse(self._files[1] in self._parserReload())132 def testGetBool(self):133 self._setUpTestFiles()134 self.assertEqual(True, config.getBool(self._filenames[1],135 'Test_4', 'test4_3'))136 self.assertEqual(None, config.getBool(self._filenames[1],137 'Test_4', 'test4_4'))138 def testListValue(self):139 self._setUpTestFiles()140 value = ["item1", "item2", "item3"]141 config.set(self._filenames[1], "Test_5", "test5_1", value)142 self.failUnlessEqual(value, config.getList(self._filenames[1],143 "Test_5", "test5_1"))144 def testTupleValue(self):145 self._setUpTestFiles()146 value = (1, "item2", False)147 config.set(self._filenames[1], "Test_5", "test5_2", value)148 self.failUnlessEqual(["1", "item2", "False"],149 config.getList(self._filenames[1], "Test_5", "test5_2"))150if __name__ == "__main__":...

Full Screen

Full Screen

Opengauss_Function_DML_Upsert_Case0118.py

Source:Opengauss_Function_DML_Upsert_Case0118.py Github

copy

Full Screen

...27 logger.info('------------------------Opengauss_Function_DML_Upsert_Case0118开始执行-----------------------------')28 def test_sysadmin_user_permission(self):29 # 建表指定id列为唯一约束且name列为数组类型30 sql_cmd1 = commonsh.execut_db_sql(''' drop table if exists test_4;31 create table test_4(name char[] ,id int unique ,address nvarchar2(50)) ;''')32 logger.info(sql_cmd1)33 self.assertIn(constant.CREATE_TABLE_SUCCESS, sql_cmd1)34 # 常规insert插入一条数据35 # 使用insert..update..EXCLUDED语句,原数据(array['c','d','a'],3,'tianjin1')更改为(array['c','d','a'],3,YUNNAN)并新增一条数据(array['c','d'],4,'dalian1')36 # 使用insert..update..EXCLUDED语句,两条数据主键均重复,更改后的数据为(array['c','d','e'],3,'YUNNAN1')和(array['c','d','g'],4,'DAQING')37 sql_cmd2 = commonsh.execut_db_sql('''insert into test_4 values(array['c','d','a'],3,'tianjin1');38 explain analyse insert into test_4 values(array['c','d','e'],3,'yunnan'),(array['c','d'],4,'dalian1') ON duplicate key update address=upper(EXCLUDED.address);39 explain analyze insert into test_4 values(array['c','d','e'],3,'yunnan1'),(array['c','d','g'],4,'daqing') ON duplicate key update address=upper(EXCLUDED.address),name=EXCLUDED.name;''')40 logger.info(sql_cmd2)41 self.assertIn(constant.INSERT_SUCCESS_MSG, sql_cmd2)42 # 清理表数据43 sql_cmd3 = commonsh.execut_db_sql('''truncate test_4;''')44 logger.info(sql_cmd3)45 self.assertIn(constant.TRUNCATE_SUCCESS_MSG, sql_cmd3)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Lemoncheesecake automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful