How to use test_normal method in green

Best Python code snippet using green

preprocess-ten-folds.py

Source:preprocess-ten-folds.py Github

copy

Full Screen

1#!/usr/bin/python2# encoding: utf-83'''4Created on May 26, 20165@author: yanruibo6'''7import os8import random9from ast import MultiTree10import numpy as np11import json12def serialize_list(list_content, filename):13 fw = open(filename, 'w')14 for item in list_content:15 fw.write(item + "\n")16 fw.flush()17 fw.close()18 19def get_list_from_file(filename):20 fr = open(filename, 'r')21 lines = fr.readlines()22 lines = [line.strip() for line in lines]23 fr.close()24 ret_lines = []25 for line in lines:26 if(line != ""):27 ret_lines.append(line)28 return ret_lines29def generate_dataset():30 normal_feature_vector_dir = "./all-feature-vector/normal/"31 normal_packagenames = os.listdir(normal_feature_vector_dir)32 vulnerable_feature_vector_dir = "./all-feature-vector/vulnerable/"33 vulnerable_packagenames = os.listdir(vulnerable_feature_vector_dir)34 35 train_normal = random.sample(normal_packagenames, len(normal_packagenames) / 2)36 test_normal = list(set(normal_packagenames) - set(train_normal))37 train_vulnerable = random.sample(vulnerable_packagenames, len(vulnerable_packagenames) / 2)38 test_vulnerable = list(set(vulnerable_packagenames) - set(train_vulnerable))39 40 serialize_list(train_normal, "./dataset/train_normal")41 serialize_list(test_normal, "./dataset/test_normal")42 serialize_list(train_vulnerable, "./dataset/train_vulnerable")43 serialize_list(test_vulnerable, "./dataset/test_vulnerable")44 45 46def generate_dataset_ten_fold():47 normal_feature_vector_dir = "./all-feature-vector/normal/"48 normal_packagenames = os.listdir(normal_feature_vector_dir)49 vulnerable_feature_vector_dir = "./all-feature-vector/vulnerable/"50 vulnerable_packagenames = os.listdir(vulnerable_feature_vector_dir)51 normal_ten_percent = len(normal_packagenames) / 1052 vulnerable_ten_percent = len(vulnerable_packagenames) / 1053 normal_begin_pos = 054 vulnerable_begin_pos = 055 for i in range(10):56 if i == 9:57 test_normal = normal_packagenames[normal_begin_pos + i * normal_ten_percent:]58 train_normal = list(set(normal_packagenames) - set(test_normal))59 test_vulnerable = vulnerable_packagenames[vulnerable_begin_pos + i * vulnerable_ten_percent:]60 train_vulnerable = list(set(vulnerable_packagenames) - set(test_vulnerable))61 serialize_list(train_normal, "./dataset/train_normal" + str(i))62 serialize_list(test_normal, "./dataset/test_normal" + str(i))63 serialize_list(train_vulnerable, "./dataset/train_vulnerable" + str(i))64 serialize_list(test_vulnerable, "./dataset/test_vulnerable" + str(i))65 66 else:67 test_normal = normal_packagenames[normal_begin_pos + i * normal_ten_percent:normal_begin_pos + (i + 1) * normal_ten_percent]68 train_normal = list(set(normal_packagenames) - set(test_normal))69 test_vulnerable = vulnerable_packagenames[vulnerable_begin_pos + i * vulnerable_ten_percent:vulnerable_begin_pos + (i + 1) * vulnerable_ten_percent]70 train_vulnerable = list(set(vulnerable_packagenames) - set(test_vulnerable))71 serialize_list(train_normal, "./dataset/train_normal" + str(i))72 serialize_list(test_normal, "./dataset/test_normal" + str(i))73 serialize_list(train_vulnerable, "./dataset/train_vulnerable" + str(i))74 serialize_list(test_vulnerable, "./dataset/test_vulnerable" + str(i))75 76def feature_select(i, method, k=400):77 78 features = get_list_from_file("./all_ngrams/all_ngrams")79 train_normal = get_list_from_file("./dataset/train_normal" + str(i))80 # test_normal = get_list_from_file("./dataset/test_normal"+str(i))81 train_vulnerable = get_list_from_file("./dataset/train_vulnerable" + str(i))82 # test_vulnerable = get_list_from_file("./dataset/test_vulnerable"+str(i))83 normal_feature_vector_dir = "./all-feature-vector/normal/"84 vulnerable_feature_vector_dir = "./all-feature-vector/vulnerable/"85 86 trainX = []87 trainY = []88 for packagename in train_normal:89 one_vector = np.loadtxt(fname=normal_feature_vector_dir + packagename, dtype=np.int)90 trainX.append(one_vector)91 trainY.append(0)92 for packagename in train_vulnerable:93 one_vector = np.loadtxt(fname=vulnerable_feature_vector_dir + packagename, dtype=np.int)94 trainX.append(one_vector)95 trainY.append(1)96 trainX = np.array(trainX)97 trainY = np.array(trainY)98 dataSet = np.c_[trainX, trainY]99 100 if(method == "infogain"):101 from TextAnalysisInfoGain2 import IG102 dataSet = dataSet.tolist()103 best_feature_indexes, selected_data = IG(dataSet, k)104 best_features = [features[index] for index in best_feature_indexes]105 serialize_list(best_features, "./selected-features/infogain" + str(i))106 np.savetxt("./selected-features/train_infogain.dat" + str(i), X=selected_data, fmt="%d", delimiter=',', newline='\n')107 return best_features, selected_data108 elif(method == "chi"):109 from TextAnalysisChi import chi110 best_feature_indexes, selected_data = chi(dataSet, k)111 best_features = [features[index] for index in best_feature_indexes]112 serialize_list(best_features, "./selected-features/chi" + str(i))113 np.savetxt("./selected-features/train_chi.dat" + str(i), X=selected_data, fmt="%d", delimiter=',', newline='\n')114 return best_features, selected_data115 elif(method == "df"):116 from TextAnalysisDF import df117 best_feature_indexes, selected_data = df(dataSet, k)118 best_features = [features[index] for index in best_feature_indexes]119 serialize_list(best_features, "./selected-features/df" + str(i))120 np.savetxt("./selected-features/train_df.dat" + str(i), X=selected_data, fmt="%d", delimiter=',', newline='\n')121 return best_features, selected_data122 123 124def test_feature_selection_infogain(i):125 '''126 import time127 startTimeStamp = time.time()128 best_features, selected_data = feature_select(i,"infogain")129 130 endTimeStamp = time.time()131 total_time = endTimeStamp - startTimeStamp132 ft = open("log_time_feature_selection_infogain_" + str(endTimeStamp), "w")133 ft.write("Total Time : " + str(total_time) + "\n")134 ft.close()135 '''136 best_features, selected_data = feature_select(i, "infogain")137 return best_features, selected_data138def test_feature_selection_chi(i):139 '''140 import time141 startTimeStamp = time.time()142 best_features, selected_data = feature_select(i,"chi")143 144 endTimeStamp = time.time()145 total_time = endTimeStamp - startTimeStamp146 ft = open("log_time_feature_selection_chi_" + str(endTimeStamp), "w")147 ft.write("Total Time : " + str(total_time) + "\n")148 ft.close()149 '''150 best_features, selected_data = feature_select(i, "chi")151 return best_features, selected_data152def test_feature_selection_df(i):153 '''154 import time155 startTimeStamp = time.time()156 best_features, selected_data = feature_select(i,"chi")157 158 endTimeStamp = time.time()159 total_time = endTimeStamp - startTimeStamp160 ft = open("log_time_feature_selection_chi_" + str(endTimeStamp), "w")161 ft.write("Total Time : " + str(total_time) + "\n")162 ft.close()163 '''164 best_features, selected_data = feature_select(i, "df")165 return best_features, selected_data166# contains feature selection167def generate_matrix_infogain(i):168 # feature selection169 best_features, selected_data = test_feature_selection_infogain(i)170 print "feature selection done!"171# import time172# startTimeStamp = time.time()173 174 train_normal = get_list_from_file("./dataset/train_normal" + str(i))175 test_normal = get_list_from_file("./dataset/test_normal" + str(i))176 train_vulnerable = get_list_from_file("./dataset/train_vulnerable" + str(i))177 test_vulnerable = get_list_from_file("./dataset/test_vulnerable" + str(i))178 print "get dataset end!"179 180 selected_ngrams = best_features181 train_added_columns = []182 normal_js_dir_suffix = "./js/normal/"183 vulnerable_js_dir_suffix = "./js/vulnerable/"184 count = 0185 trainY = []186 for packagename in train_normal:187 js_dir = normal_js_dir_suffix + packagename + ".js"188 print count, js_dir189 count += 1190 json_str = os.popen('node analyze.js ' + js_dir).read()191 py_dict = json.loads(json_str)192 tree = MultiTree(py_dict)193 one_vector = []194 one_vector.append(tree.get_max_width())195 one_vector.append(tree.get_max_depth())196 train_added_columns.append(one_vector)197 trainY.append(0)198 for packagename in train_vulnerable:199 js_dir = vulnerable_js_dir_suffix + packagename + ".js"200 print count, js_dir201 count += 1202 json_str = os.popen('node analyze.js ' + js_dir).read()203 py_dict = json.loads(json_str)204 tree = MultiTree(py_dict)205 one_vector = []206 one_vector.append(tree.get_max_width())207 one_vector.append(tree.get_max_depth())208 train_added_columns.append(one_vector)209 trainY.append(1)210 train_added_columns = np.array(train_added_columns)211 train_matrix = np.c_[selected_data, train_added_columns]212 train_matrix = np.c_[train_matrix, trainY]213 np.savetxt("./train-test-matrix/train_infogain.dat" + str(i), X=train_matrix, fmt="%d", delimiter=',', newline='\n')214 print "generate train matrix done!"215 216 test_matrix = []217 normal_ngrams_dir = "./ngrams/normal/"218 vulnerable_ngrams_dir = "./ngrams/vulnerable/"219 for packagename in test_normal:220 test_vector = []221 one_list = get_list_from_file(normal_ngrams_dir + packagename)222 for gram in selected_ngrams:223 test_vector.append(one_list.count(gram))224 js_dir = normal_js_dir_suffix + packagename + ".js"225 print count, js_dir226 count += 1227 json_str = os.popen('node analyze.js ' + js_dir).read()228 py_dict = json.loads(json_str)229 tree = MultiTree(py_dict) 230 test_vector.append(tree.get_max_width())231 test_vector.append(tree.get_max_depth())232 test_vector.append(0) 233 test_matrix.append(test_vector)234 235 for packagename in test_vulnerable:236 test_vector = []237 one_list = get_list_from_file(vulnerable_ngrams_dir + packagename)238 for gram in selected_ngrams:239 test_vector.append(one_list.count(gram))240 241 js_dir = vulnerable_js_dir_suffix + packagename + ".js"242 print count, js_dir243 count += 1244 json_str = os.popen('node analyze.js ' + js_dir).read()245 py_dict = json.loads(json_str)246 tree = MultiTree(py_dict) 247 test_vector.append(tree.get_max_width())248 test_vector.append(tree.get_max_depth())249 test_vector.append(1) 250 test_matrix.append(test_vector)251 252 test_matrix = np.array(test_matrix)253 np.savetxt("./train-test-matrix/test_infogain.dat" + str(i), X=test_matrix, fmt="%d", delimiter=',', newline='\n')254 print "generate test matrix done!"255 256 257# endTimeStamp = time.time()258# total_time = endTimeStamp - startTimeStamp259# ft = open("log_time_generate_matrix_infogain_" + str(endTimeStamp), "w")260# ft.write("Total Time : " + str(total_time) + "\n")261# ft.close()262 263 264# contains feature selection265def generate_matrix_chi(i):266 # feature selection267 best_features, selected_data = test_feature_selection_chi(i)268 print "feature selection done!"269# import time270# startTimeStamp = time.time()271 272 train_normal = get_list_from_file("./dataset/train_normal" + str(i))273 test_normal = get_list_from_file("./dataset/test_normal" + str(i))274 train_vulnerable = get_list_from_file("./dataset/train_vulnerable" + str(i))275 test_vulnerable = get_list_from_file("./dataset/test_vulnerable" + str(i))276 print "get dataset end!"277 278 selected_ngrams = best_features279 train_added_columns = []280 normal_js_dir_suffix = "./js/normal/"281 vulnerable_js_dir_suffix = "./js/vulnerable/"282 count = 0283 trainY = []284 for packagename in train_normal:285 js_dir = normal_js_dir_suffix + packagename + ".js"286 print count, js_dir287 count += 1288 json_str = os.popen('node analyze.js ' + js_dir).read()289 py_dict = json.loads(json_str)290 tree = MultiTree(py_dict)291 one_vector = []292 one_vector.append(tree.get_max_width())293 one_vector.append(tree.get_max_depth())294 train_added_columns.append(one_vector)295 trainY.append(0)296 for packagename in train_vulnerable:297 js_dir = vulnerable_js_dir_suffix + packagename + ".js"298 print count, js_dir299 count += 1300 json_str = os.popen('node analyze.js ' + js_dir).read()301 py_dict = json.loads(json_str)302 tree = MultiTree(py_dict)303 one_vector = []304 one_vector.append(tree.get_max_width())305 one_vector.append(tree.get_max_depth())306 train_added_columns.append(one_vector)307 trainY.append(1)308 train_added_columns = np.array(train_added_columns)309 train_matrix = np.c_[selected_data, train_added_columns]310 train_matrix = np.c_[train_matrix, trainY]311 np.savetxt("./train-test-matrix/train_chi.dat" + str(i), X=train_matrix, fmt="%d", delimiter=',', newline='\n')312 print "generate train matrix done!"313 314 test_matrix = []315 normal_ngrams_dir = "./ngrams/normal/"316 vulnerable_ngrams_dir = "./ngrams/vulnerable/"317 for packagename in test_normal:318 test_vector = []319 one_list = get_list_from_file(normal_ngrams_dir + packagename)320 for gram in selected_ngrams:321 test_vector.append(one_list.count(gram))322 js_dir = normal_js_dir_suffix + packagename + ".js"323 print count, js_dir324 count += 1325 json_str = os.popen('node analyze.js ' + js_dir).read()326 py_dict = json.loads(json_str)327 tree = MultiTree(py_dict) 328 test_vector.append(tree.get_max_width())329 test_vector.append(tree.get_max_depth())330 test_vector.append(0) 331 test_matrix.append(test_vector)332 333 for packagename in test_vulnerable:334 test_vector = []335 one_list = get_list_from_file(vulnerable_ngrams_dir + packagename)336 for gram in selected_ngrams:337 test_vector.append(one_list.count(gram))338 339 js_dir = vulnerable_js_dir_suffix + packagename + ".js"340 print count, js_dir341 count += 1342 json_str = os.popen('node analyze.js ' + js_dir).read()343 py_dict = json.loads(json_str)344 tree = MultiTree(py_dict) 345 test_vector.append(tree.get_max_width())346 test_vector.append(tree.get_max_depth())347 test_vector.append(1) 348 test_matrix.append(test_vector)349 350 test_matrix = np.array(test_matrix)351 np.savetxt("./train-test-matrix/test_chi.dat" + str(i), X=test_matrix, fmt="%d", delimiter=',', newline='\n')352 print "generate test matrix done!"353 354 355# endTimeStamp = time.time()356# total_time = endTimeStamp - startTimeStamp357# ft = open("log_time_generate_matrix_infogain_" + str(endTimeStamp), "w")358# ft.write("Total Time : " + str(total_time) + "\n")359# ft.close()360 361# contains feature selection362def generate_matrix_df(i):363 # feature selection364 best_features, selected_data = test_feature_selection_df(i)365 print "feature selection done!"366# import time367# startTimeStamp = time.time()368 369 train_normal = get_list_from_file("./dataset/train_normal" + str(i))370 test_normal = get_list_from_file("./dataset/test_normal" + str(i))371 train_vulnerable = get_list_from_file("./dataset/train_vulnerable" + str(i))372 test_vulnerable = get_list_from_file("./dataset/test_vulnerable" + str(i))373 print "get dataset end!"374 375 selected_ngrams = best_features376 train_added_columns = []377 normal_js_dir_suffix = "./js/normal/"378 vulnerable_js_dir_suffix = "./js/vulnerable/"379 count = 0380 trainY = []381 for packagename in train_normal:382 js_dir = normal_js_dir_suffix + packagename + ".js"383 print count, js_dir384 count += 1385 json_str = os.popen('node analyze.js ' + js_dir).read()386 py_dict = json.loads(json_str)387 tree = MultiTree(py_dict)388 one_vector = []389 one_vector.append(tree.get_max_width())390 one_vector.append(tree.get_max_depth())391 train_added_columns.append(one_vector)392 trainY.append(0)393 for packagename in train_vulnerable:394 js_dir = vulnerable_js_dir_suffix + packagename + ".js"395 print count, js_dir396 count += 1397 json_str = os.popen('node analyze.js ' + js_dir).read()398 py_dict = json.loads(json_str)399 tree = MultiTree(py_dict)400 one_vector = []401 one_vector.append(tree.get_max_width())402 one_vector.append(tree.get_max_depth())403 train_added_columns.append(one_vector)404 trainY.append(1)405 train_added_columns = np.array(train_added_columns)406 train_matrix = np.c_[selected_data, train_added_columns]407 train_matrix = np.c_[train_matrix, trainY]408 np.savetxt("./train-test-matrix/train_df.dat" + str(i), X=train_matrix, fmt="%d", delimiter=',', newline='\n')409 print "generate train matrix done!"410 411 test_matrix = []412 normal_ngrams_dir = "./ngrams/normal/"413 vulnerable_ngrams_dir = "./ngrams/vulnerable/"414 for packagename in test_normal:415 test_vector = []416 one_list = get_list_from_file(normal_ngrams_dir + packagename)417 for gram in selected_ngrams:418 test_vector.append(one_list.count(gram))419 js_dir = normal_js_dir_suffix + packagename + ".js"420 print count, js_dir421 count += 1422 json_str = os.popen('node analyze.js ' + js_dir).read()423 py_dict = json.loads(json_str)424 tree = MultiTree(py_dict) 425 test_vector.append(tree.get_max_width())426 test_vector.append(tree.get_max_depth())427 test_vector.append(0) 428 test_matrix.append(test_vector)429 430 for packagename in test_vulnerable:431 test_vector = []432 one_list = get_list_from_file(vulnerable_ngrams_dir + packagename)433 for gram in selected_ngrams:434 test_vector.append(one_list.count(gram))435 436 js_dir = vulnerable_js_dir_suffix + packagename + ".js"437 print count, js_dir438 count += 1439 json_str = os.popen('node analyze.js ' + js_dir).read()440 py_dict = json.loads(json_str)441 tree = MultiTree(py_dict) 442 test_vector.append(tree.get_max_width())443 test_vector.append(tree.get_max_depth())444 test_vector.append(1) 445 test_matrix.append(test_vector)446 447 test_matrix = np.array(test_matrix)448 np.savetxt("./train-test-matrix/test_df.dat" + str(i), X=test_matrix, fmt="%d", delimiter=',', newline='\n')449 print "generate test matrix done!"450 451 452 453 454def generate_matrix_infogain_ten():455 for i in range(10):456 generate_matrix_infogain(i)457def generate_matrix_chi_ten():458 for i in range(10):459 generate_matrix_chi(i)460 461def generate_matrix_df_ten():462 for i in range(10):463 generate_matrix_df(i)464if __name__ == '__main__':465 # generate_dataset_ten_fold()466 # generate_matrix_infogain_ten()467 #generate_matrix_chi_ten()468 generate_matrix_df_ten()...

Full Screen

Full Screen

test_TIterator.py

Source:test_TIterator.py Github

copy

Full Screen

...15 name: str16 ruby: TOption[str]17 address: TOption[Address]18class TestToCsv:19 def test_normal(self):20 d = Human.from_iterable_dicts(21 [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]22 )23 assert (24 d.to_csv(["id", "name", "ruby"])25 == """261,一郎,272,二郎,じろう28""".lstrip()29 )30 assert d.to_csv(["id", "name", "ruby"]) == ""31 def test_ignore_extra_params(self):32 d = Human.from_iterable_dicts(33 [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]34 )35 assert (36 d.to_csv(["id", "name"])37 == """381,一郎392,二郎40""".lstrip()41 )42 assert d.to_csv(["id", "name"]) == ""43 def test_with_header(self):44 d = Human.from_iterable_dicts(45 [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]46 )47 assert (48 d.to_csv(["id", "name", "ruby"], with_header=True)49 == """50id,name,ruby511,一郎,522,二郎,じろう53""".lstrip()54 )55 assert (56 d.to_csv(["id", "name", "ruby"], with_header=True)57 == """58id,name,ruby59""".lstrip()60 )61 def test_with_space(self):62 d = Human.from_iterable_dicts(63 [{"id": 1, "name": " 一 郎 "}, {"id": 2, "name": " 二 郎 ", "ruby": "じろう"}]64 )65 assert (66 d.to_csv(["id", "name", "ruby"])67 == """681, 一 郎 ,692, 二 郎 ,じろう70""".lstrip()71 )72 assert d.to_csv(["id", "name", "ruby"]) == ""73 def test_crlf(self):74 d = Human.from_iterable_dicts(75 [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]76 )77 assert (78 d.to_csv(["id", "name", "ruby"], crlf=True)79 == """801,一郎,\r812,二郎,じろう\r82""".lstrip()83 )84 assert d.to_csv(["id", "name", "ruby"], crlf=True) == ""85 def test_tsv(self):86 d = Human.from_iterable_dicts(87 [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]88 )89 assert (90 d.to_csv(["id", "name", "ruby"], tsv=True)91 == """921\t一郎\t932\t二郎\tじろう94""".lstrip()95 )96 assert d.to_csv(["id", "name", "ruby"], tsv=True) == ""97 def test_including_dict(self):98 d = Human.from_iterable_dicts(99 [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "address": {"name": "住所"}}]100 )101 assert (102 d.to_csv(["id", "name", "address"])103 == """1041,一郎,1052,二郎,{'name': '住所'}106""".lstrip()107 )108 assert d.to_csv(["id", "name", "address"]) == ""109 def test_including_list(self):110 d = Spot.from_iterable_dicts(111 [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]112 )113 assert (114 d.to_csv(["names", "address"])115 == """116['spot1'],{'name': 'address1'}117"['spot21','spot22']",118""".lstrip()119 )120 assert d.to_csv(["names", "address"]) == ""121class TestToCsvf:122 """123 Requirements: `from_csvf_to_iterator` are fine124 """125 def test_normal(self, tmpdir):126 origin = [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]127 it: TIterator[Human] = Human.from_iterable_dicts(origin)128 fpath = os.path.join(tmpdir.mkdir("tmp").strpath, "test.csv")129 assert (130 it.to_csvf(131 fpath, fieldnames=["name", "id", "ruby"], encoding="euc-jp", with_header=False132 )133 == fpath134 )135 assert (136 Human.from_csvf_to_iterator(137 fpath, fieldnames=["name", "id", "ruby"], encoding="euc-jp"138 ).to_dicts()139 == origin140 )141class TestNextAt:142 def test_normal(self):143 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]144 assert Spot.from_iterable_dicts(d).next_at(1).get().to_dict() == {145 "names": ["spot21", "spot22"]146 }147 def test_not_found(self):148 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]149 assert Spot.from_iterable_dicts(d).next_at(2).is_none()150class TestForEach:151 def test_normal(self):152 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]153 ret = []154 assert Spot.from_iterable_dicts(d).for_each(lambda s: ret.append(s.names[0])) is None155 assert ret == ["spot1", "spot21"]156class TestMap:157 def test_normal(self):158 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]159 assert Spot.from_iterable_dicts(d).map(lambda s: s.names).to_list() == [160 ["spot1"],161 ["spot21", "spot22"],162 ]163class TestEMap:164 def test_normal(self):165 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]166 assert Spot.from_iterable_dicts(d).emap(lambda s, i: (i + 1, s.names)).to_list() == [167 (1, ["spot1"]),168 (2, ["spot21", "spot22"]),169 ]170class TestFlatten:171 def test_normal(self):172 assert TIterator([[1, 2], [3, 4]]).flatten().to_list() == [1, 2, 3, 4]173class TestFlatMap:174 def test_normal(self):175 ds = Human.from_iterable_dicts(176 [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]177 )178 assert ds.flat_map(lambda x: [x.id, x.name]).to_list() == [1, "一郎", 2, "二郎"]179class TestFilter:180 def test_normal(self):181 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]182 assert Spot.from_iterable_dicts(d).filter(lambda s: s.address.get()).to_dicts() == [183 {"names": ["spot1"], "address": {"name": "address1"}}184 ]185class TestReject:186 def test_normal(self):187 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]188 assert Spot.from_iterable_dicts(d).reject(lambda s: s.address.get()).to_dicts() == [189 {"names": ["spot21", "spot22"]}190 ]191class TestHead:192 def test_normal(self):193 d = [194 {"names": ["spot1"], "address": {"name": "address1"}},195 {"names": ["spot21", "spot22"]},196 {"names": ["spot31", "spot32"]},197 {"names": ["spot4"], "address": {"name": "address1"}},198 ]199 assert Spot.from_iterable_dicts(d).head().get().to_dict() == {200 "names": ["spot1"],201 "address": {"name": "address1"},202 }203class TestTake:204 def test_normal(self):205 d = [206 {"names": ["spot1"], "address": {"name": "address1"}},207 {"names": ["spot21", "spot22"]},208 {"names": ["spot31", "spot32"]},209 {"names": ["spot4"], "address": {"name": "address1"}},210 ]211 assert Spot.from_iterable_dicts(d).take(3).to_dicts() == [212 {"names": ["spot1"], "address": {"name": "address1"}},213 {"names": ["spot21", "spot22"]},214 {"names": ["spot31", "spot32"]},215 ]216class TestTakeWhile:217 def test_normal(self):218 d = [219 {"names": ["spot11", "spot12"], "address": {"name": "address1"}},220 {"names": ["spot21", "spot22"]},221 {"names": ["spot31"]},222 {"names": ["spot41", "spot42"], "address": {"name": "address1"}},223 ]224 assert [225 {"names": ["spot11", "spot12"], "address": {"name": "address1"}},226 {"names": ["spot21", "spot22"]},227 ] == Spot.from_iterable_dicts(d).take_while(lambda x: x.names.size() > 1).to_dicts()228class TestTail:229 def test_normal(self):230 d = [231 {"names": ["spot1"], "address": {"name": "address1"}},232 {"names": ["spot21", "spot22"]},233 {"names": ["spot31", "spot32"]},234 {"names": ["spot4"], "address": {"name": "address1"}},235 ]236 assert Spot.from_iterable_dicts(d).tail(3).to_dicts() == [237 {"names": ["spot21", "spot22"]},238 {"names": ["spot31", "spot32"]},239 {"names": ["spot4"], "address": {"name": "address1"}},240 ]241class TestUniq:242 def test_normal(self):243 """ Don't forget `d[0] != d[1]`244 """245 d = [246 {"names": ["spot1"], "address": {"name": "address1"}},247 {"names": ["spot1"], "address": {"name": "address1"}},248 {"names": ["spot4"], "address": {"name": "address1"}},249 ]250 assert Spot.from_iterable_dicts(d).uniq().to_dicts() == [251 {"names": ["spot1"], "address": {"name": "address1"}},252 {"names": ["spot1"], "address": {"name": "address1"}},253 {"names": ["spot4"], "address": {"name": "address1"}},254 ]255class TestUniqBy:256 def test_normal(self):257 d = [258 {"names": ["spot1"], "address": {"name": "address1"}},259 {"names": ["spot1"], "address": {"name": "address1"}},260 {"names": ["spot4"], "address": {"name": "address1"}},261 ]262 assert Spot.from_iterable_dicts(d).uniq_by(lambda x: x.to_json()).to_dicts() == [263 {"names": ["spot1"], "address": {"name": "address1"}},264 {"names": ["spot4"], "address": {"name": "address1"}},265 ]266class TestPartition:267 def test_normal(self):268 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]269 rejected, fulfilled = Spot.from_iterable_dicts(d).partition(lambda s: s.address.get())270 assert fulfilled.to_dicts() == [{"names": ["spot1"], "address": {"name": "address1"}}]271 assert rejected.to_dicts() == [{"names": ["spot21", "spot22"]}]272class TestGroupBy:273 def test_normal(self):274 d = [275 {"names": ["spot1"], "address": {"name": "address1"}},276 {"names": ["spot21", "spot22"]},277 {"names": ["spot31", "spot32"]},278 {"names": ["spot4"], "address": {"name": "address1"}},279 ]280 assert Spot.from_iterable_dicts(d).group_by(lambda s: str(len(s.names))).to_dict(281 ignore_none=True282 ) == {283 "1": [284 {"names": ["spot1"], "address": {"name": "address1"}},285 {"names": ["spot4"], "address": {"name": "address1"}},286 ],287 "2": [{"names": ["spot21", "spot22"]}, {"names": ["spot31", "spot32"]}],288 }289 assert Spot.from_iterable_dicts(d).group_by(lambda s: str(len(s.names)))["1"].map(290 lambda x: x.names291 ).to_dicts() == [["spot1"], ["spot4"]]292class TestKeyBy:293 def test_normal(self):294 d = [295 {"names": ["spot1"], "address": {"name": "address1"}},296 {"names": ["spot21", "spot22"]},297 {"names": ["spot31", "spot32"]},298 {"names": ["spot4"], "address": {"name": "address1"}},299 ]300 assert Spot.from_iterable_dicts(d).key_by(lambda s: str(len(s.names))).to_dict(301 ignore_none=True302 ) == {303 "1": {"names": ["spot4"], "address": {"name": "address1"}},304 "2": {"names": ["spot31", "spot32"]},305 }306class TestOrderBy:307 def test_normal(self):308 d = [309 {"names": ["spot1"], "address": {"name": "address1"}},310 {"names": ["spot21", "spot22", "spot23"]},311 {"names": ["spot31", "spot32", "spot33", "spot34"]},312 {"names": ["spot41", "spot42"], "address": {"name": "address1"}},313 ]314 assert Spot.from_iterable_dicts(d).order_by(lambda x: len(x.names)).to_dicts() == [315 {"names": ["spot1"], "address": {"name": "address1"}},316 {"names": ["spot41", "spot42"], "address": {"name": "address1"}},317 {"names": ["spot21", "spot22", "spot23"]},318 {"names": ["spot31", "spot32", "spot33", "spot34"]},319 ]320 def test_reverse(self):321 d = [322 {"names": ["spot1"], "address": {"name": "address1"}},323 {"names": ["spot21", "spot22", "spot23"]},324 {"names": ["spot31", "spot32", "spot33", "spot34"]},325 {"names": ["spot41", "spot42"], "address": {"name": "address1"}},326 ]327 assert Spot.from_iterable_dicts(d).order_by(328 lambda x: len(x.names), reverse=True329 ).to_dicts() == [330 {"names": ["spot31", "spot32", "spot33", "spot34"]},331 {"names": ["spot21", "spot22", "spot23"]},332 {"names": ["spot41", "spot42"], "address": {"name": "address1"}},333 {"names": ["spot1"], "address": {"name": "address1"}},334 ]335class TestConcat:336 def test_normal(self):337 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]338 e = [{"names": ["spot31", "spot32"]}]339 assert Spot.from_iterable_dicts(d).concat(Spot.from_iterable_dicts(e)).to_dicts() == [340 {"names": ["spot1"], "address": {"name": "address1"}},341 {"names": ["spot21", "spot22"]},342 {"names": ["spot31", "spot32"]},343 ]344 def test_first(self):345 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]346 e = [{"names": ["spot31", "spot32"]}]347 assert Spot.from_iterable_dicts(d).concat(348 Spot.from_iterable_dicts(e), first=True349 ).to_dicts() == [350 {"names": ["spot31", "spot32"]},351 {"names": ["spot1"], "address": {"name": "address1"}},352 {"names": ["spot21", "spot22"]},353 ]354class TestReduce:355 def test_normal(self):356 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]357 assert Spot.from_iterable_dicts(d).reduce(lambda r, x: r + len(x.names), 100) == 103358class TestSum:359 def test_normal(self):360 assert TIterator([1, 2, 3]).sum() == 6361class TestSumBy:362 def test_normal(self):363 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]364 assert Spot.from_iterable_dicts(d).sum_by(lambda x: len(x.names)) == 3365class TestCountBy:366 def test_normal(self):367 d = [368 {"names": ["spot1"], "address": {"name": "address1"}},369 {"names": ["spot21", "spot22"]},370 {"names": ["spot31", "spot32"]},371 ]372 assert Spot.from_iterable_dicts(d).count_by(lambda x: len(x.names)) == {1: 1, 2: 2}373class TestJoin:374 def test_normal(self):375 assert TIterator(["a", "bc", "def"]).join("---") == "a---bc---def"376 def test_including_not_str(self):377 with pytest.raises(TypeError):378 TIterator(["1", 2, "3"]).join("---")379class TestFind:380 def test_normal(self):381 d = [382 {"names": ["spot1"], "address": {"name": "address1"}},383 {"names": ["spot21", "spot22"]},384 {"names": ["spot31", "spot32", "spot33"]},385 ]386 assert Spot.from_iterable_dicts(d).find(lambda x: len(x.names) == 2).get().to_dict(387 ignore_none=True388 ) == {"names": ["spot21", "spot22"]}389 def test_not_found(self):390 d = [391 {"names": ["spot1"], "address": {"name": "address1"}},392 {"names": ["spot21", "spot22"]},393 {"names": ["spot31", "spot32"]},394 ]395 assert Spot.from_iterable_dicts(d).find(lambda x: len(x.names) == 3).is_none()396class TestAll:397 def test_true(self):398 d = [399 {"names": ["spot1"], "address": {"name": "address1"}},400 {"names": ["spot21", "spot22"]},401 {"names": ["spot31", "spot32"]},402 ]403 assert Spot.from_iterable_dicts(d).all(lambda x: x.names) is True404 def test_false(self):405 d = [406 {"names": ["spot1"], "address": {"name": "address1"}},407 {"names": ["spot21", "spot22"]},408 {"names": ["spot31", "spot32"]},409 ]410 assert Spot.from_iterable_dicts(d).all(lambda x: len(x.names) > 1) is False411class TestAny:412 def test_true(self):413 d = [414 {"names": ["spot1"], "address": {"name": "address1"}},415 {"names": ["spot21", "spot22"]},416 {"names": ["spot31", "spot32"]},417 ]418 assert Spot.from_iterable_dicts(d).any(lambda x: len(x.names) > 1) is True419 def test_false(self):420 d = [421 {"names": ["spot1"], "address": {"name": "address1"}},422 {"names": ["spot21", "spot22"]},423 {"names": ["spot31", "spot32"]},424 ]425 assert Spot.from_iterable_dicts(d).any(lambda x: len(x.names) > 2) is False426class TestIntersection:427 def test_normal(self):428 assert TIterator([1, 2, 3, 4, 5]).intersection([2, 4, 6]).to_list() == [2, 4]429 def test_empty(self):430 assert TIterator([1, 2, 3, 4, 5]).intersection([7, 8]).to_list() == []431class TestNotIntersection:432 def test_normal(self):433 assert TIterator([1, 2, 3, 4, 5]).not_intersection([2, 4, 6]).to_list() == [1, 3, 5]434 def test_empty(self):435 assert TIterator([1, 2, 3, 4, 5]).not_intersection([1, 2, 3, 4, 5]).to_list() == []436class TestReverse:437 def test_normal(self):438 assert TIterator([1, 2, 3]).reverse().to_list() == [3, 2, 1]439 def test_empty(self):...

Full Screen

Full Screen

test_TList.py

Source:test_TList.py Github

copy

Full Screen

...14 name: str15 ruby: TOption[str]16 address: TOption[Address]17class Test__Add__:18 def test_normal(self):19 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]20 e = [{"names": ["spot31", "spot32"]}]21 sd = Spot.from_dicts(d)22 se = Spot.from_dicts(e)23 actual = sd + se24 assert sd.to_dicts() == [25 {"names": ["spot1"], "address": {"name": "address1"}},26 {"names": ["spot21", "spot22"]},27 ]28 assert se.to_dicts() == [{"names": ["spot31", "spot32"]}]29 assert isinstance(actual, TList)30 assert actual.to_dicts() == [31 {"names": ["spot1"], "address": {"name": "address1"}},32 {"names": ["spot21", "spot22"]},33 {"names": ["spot31", "spot32"]},34 ]35 def test_exchange_rule(self):36 assert [1, 2] == TList([1]) + TList([2])37 assert [1, 2] == [1] + TList([2])38 assert [1, 2] == TList([1]) + [2]39class TestToCsv:40 def test_normal(self):41 d = [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]42 assert (43 Human.from_dicts(d).to_csv(["id", "name", "ruby"])44 == """451,一郎,462,二郎,じろう47""".lstrip()48 )49 def test_ignore_extra_params(self):50 d = [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]51 assert (52 Human.from_dicts(d).to_csv(["id", "name"])53 == """541,一郎552,二郎56""".lstrip()57 )58 def test_with_header(self):59 d = [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]60 assert (61 Human.from_dicts(d).to_csv(["id", "name", "ruby"], with_header=True)62 == """63id,name,ruby641,一郎,652,二郎,じろう66""".lstrip()67 )68 def test_with_space(self):69 d = [{"id": 1, "name": " 一 郎 "}, {"id": 2, "name": " 二 郎 ", "ruby": "じろう"}]70 assert (71 Human.from_dicts(d).to_csv(["id", "name", "ruby"])72 == """731, 一 郎 ,742, 二 郎 ,じろう75""".lstrip()76 )77 def test_crlf(self):78 d = [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]79 assert (80 Human.from_dicts(d).to_csv(["id", "name", "ruby"], crlf=True)81 == """821,一郎,\r832,二郎,じろう\r84""".lstrip()85 )86 def test_tsv(self):87 d = [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]88 assert (89 Human.from_dicts(d).to_csv(["id", "name", "ruby"], tsv=True)90 == """911\t一郎\t922\t二郎\tじろう93""".lstrip()94 )95 def test_including_dict(self):96 d = [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "address": {"name": "住所"}}]97 assert (98 Human.from_dicts(d).to_csv(["id", "name", "address"])99 == """1001,一郎,1012,二郎,{'name': '住所'}102""".lstrip()103 )104 def test_including_list(self):105 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]106 assert (107 Spot.from_dicts(d).to_csv(["names", "address"])108 == """109['spot1'],{'name': 'address1'}110"['spot21','spot22']",111""".lstrip()112 )113class TestToCsvf:114 """115 Requirements: `from_csvf_to_list` are fine116 """117 def test_normal(self, tmpdir):118 r: TList[Human] = Human.from_dicts(119 [{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}]120 )121 fpath = os.path.join(tmpdir.mkdir("tmp").strpath, "test.csv")122 assert (123 r.to_csvf(124 fpath, fieldnames=["name", "id", "ruby"], encoding="euc-jp", with_header=False125 )126 == fpath127 )128 assert (129 Human.from_csvf_to_list(130 fpath, fieldnames=["name", "id", "ruby"], encoding="euc-jp"131 ).to_dicts()132 == r.to_dicts()133 )134class TestGet:135 def test_normal(self):136 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]137 assert Spot.from_dicts(d).get(1).get().to_dict() == {"names": ["spot21", "spot22"]}138 def test_not_found(self):139 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]140 assert Spot.from_dicts(d).get(2).is_none()141class TestForEach:142 def test_normal(self):143 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]144 ret = []145 assert Spot.from_dicts(d).for_each(lambda s: ret.append(s.names[0])) is None146 assert ret == ["spot1", "spot21"]147class TestMap:148 def test_normal(self):149 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]150 assert Spot.from_dicts(d).map(lambda s: s.names) == [["spot1"], ["spot21", "spot22"]]151class TestEMap:152 def test_normal(self):153 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]154 assert Spot.from_dicts(d).emap(lambda s, i: (i + 1, s.names)) == [155 (1, ["spot1"]),156 (2, ["spot21", "spot22"]),157 ]158class TestFlatten:159 def test_normal(self):160 assert TList([[1, 2], [3, 4]]).flatten() == [1, 2, 3, 4]161class TestFlatMap:162 def test_normal(self):163 ds = Human.from_dicts([{"id": 1, "name": "一郎"}, {"id": 2, "name": "二郎", "ruby": "じろう"}])164 assert ds.flat_map(lambda x: [x.id, x.name]) == [1, "一郎", 2, "二郎"]165class TestFilter:166 def test_normal(self):167 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]168 assert Spot.from_dicts(d).filter(lambda s: s.address.get()).to_dicts() == [169 {"names": ["spot1"], "address": {"name": "address1"}}170 ]171class TestReject:172 def test_normal(self):173 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]174 assert Spot.from_dicts(d).reject(lambda s: s.address.get()).to_dicts() == [175 {"names": ["spot21", "spot22"]}176 ]177class TestHead:178 def test_normal(self):179 d = [180 {"names": ["spot1"], "address": {"name": "address1"}},181 {"names": ["spot21", "spot22"]},182 {"names": ["spot31", "spot32"]},183 {"names": ["spot4"], "address": {"name": "address1"}},184 ]185 assert Spot.from_dicts(d).head().get().to_dict() == {186 "names": ["spot1"],187 "address": {"name": "address1"},188 }189class TestTake:190 def test_normal(self):191 d = [192 {"names": ["spot1"], "address": {"name": "address1"}},193 {"names": ["spot21", "spot22"]},194 {"names": ["spot31", "spot32"]},195 {"names": ["spot4"], "address": {"name": "address1"}},196 ]197 assert Spot.from_dicts(d).take(3).to_dicts() == [198 {"names": ["spot1"], "address": {"name": "address1"}},199 {"names": ["spot21", "spot22"]},200 {"names": ["spot31", "spot32"]},201 ]202class TestTakeWhile:203 def test_normal(self):204 d = [205 {"names": ["spot11", "spot12"], "address": {"name": "address1"}},206 {"names": ["spot21", "spot22"]},207 {"names": ["spot31"]},208 {"names": ["spot41", "spot42"], "address": {"name": "address1"}},209 ]210 assert [211 {"names": ["spot11", "spot12"], "address": {"name": "address1"}},212 {"names": ["spot21", "spot22"]},213 ] == Spot.from_dicts(d).take_while(lambda x: x.names.size() > 1).to_dicts()214class TestTail:215 def test_normal(self):216 d = [217 {"names": ["spot1"], "address": {"name": "address1"}},218 {"names": ["spot21", "spot22"]},219 {"names": ["spot31", "spot32"]},220 {"names": ["spot4"], "address": {"name": "address1"}},221 ]222 assert Spot.from_dicts(d).tail(3).to_dicts() == [223 {"names": ["spot21", "spot22"]},224 {"names": ["spot31", "spot32"]},225 {"names": ["spot4"], "address": {"name": "address1"}},226 ]227class TestUniq:228 def test_normal(self):229 """ Don't forget `d[0] != d[1]`230 """231 d = [232 {"names": ["spot1"], "address": {"name": "address1"}},233 {"names": ["spot1"], "address": {"name": "address1"}},234 {"names": ["spot4"], "address": {"name": "address1"}},235 ]236 assert Spot.from_dicts(d).uniq().to_dicts() == [237 {"names": ["spot1"], "address": {"name": "address1"}},238 {"names": ["spot1"], "address": {"name": "address1"}},239 {"names": ["spot4"], "address": {"name": "address1"}},240 ]241class TestUniqBy:242 def test_normal(self):243 d = [244 {"names": ["spot1"], "address": {"name": "address1"}},245 {"names": ["spot1"], "address": {"name": "address1"}},246 {"names": ["spot4"], "address": {"name": "address1"}},247 ]248 assert Spot.from_dicts(d).uniq_by(lambda x: x.to_json()).to_dicts() == [249 {"names": ["spot1"], "address": {"name": "address1"}},250 {"names": ["spot4"], "address": {"name": "address1"}},251 ]252class TestPartition:253 def test_normal(self):254 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]255 rejected, fulfilled = Spot.from_dicts(d).partition(lambda s: s.address.get())256 assert fulfilled.to_dicts() == [{"names": ["spot1"], "address": {"name": "address1"}}]257 assert rejected.to_dicts() == [{"names": ["spot21", "spot22"]}]258class TestGroupBy:259 def test_normal(self):260 d = [261 {"names": ["spot1"], "address": {"name": "address1"}},262 {"names": ["spot21", "spot22"]},263 {"names": ["spot31", "spot32"]},264 {"names": ["spot4"], "address": {"name": "address1"}},265 ]266 assert Spot.from_dicts(d).group_by(lambda s: str(len(s.names))).to_dict(267 ignore_none=True268 ) == {269 "1": [270 {"names": ["spot1"], "address": {"name": "address1"}},271 {"names": ["spot4"], "address": {"name": "address1"}},272 ],273 "2": [{"names": ["spot21", "spot22"]}, {"names": ["spot31", "spot32"]}],274 }275class TestKeyBy:276 def test_normal(self):277 d = [278 {"names": ["spot1"], "address": {"name": "address1"}},279 {"names": ["spot21", "spot22"]},280 {"names": ["spot31", "spot32"]},281 {"names": ["spot4"], "address": {"name": "address1"}},282 ]283 assert Spot.from_dicts(d).key_by(lambda s: str(len(s.names))).to_dict(ignore_none=True) == {284 "1": {"names": ["spot4"], "address": {"name": "address1"}},285 "2": {"names": ["spot31", "spot32"]},286 }287class TestOrderBy:288 def test_normal(self):289 d = [290 {"names": ["spot1"], "address": {"name": "address1"}},291 {"names": ["spot21", "spot22", "spot23"]},292 {"names": ["spot31", "spot32", "spot33", "spot34"]},293 {"names": ["spot41", "spot42"], "address": {"name": "address1"}},294 ]295 assert Spot.from_dicts(d).order_by(lambda x: len(x.names)).to_dicts() == [296 {"names": ["spot1"], "address": {"name": "address1"}},297 {"names": ["spot41", "spot42"], "address": {"name": "address1"}},298 {"names": ["spot21", "spot22", "spot23"]},299 {"names": ["spot31", "spot32", "spot33", "spot34"]},300 ]301 def test_reverse(self):302 d = [303 {"names": ["spot1"], "address": {"name": "address1"}},304 {"names": ["spot21", "spot22", "spot23"]},305 {"names": ["spot31", "spot32", "spot33", "spot34"]},306 {"names": ["spot41", "spot42"], "address": {"name": "address1"}},307 ]308 assert Spot.from_dicts(d).order_by(lambda x: len(x.names), reverse=True).to_dicts() == [309 {"names": ["spot31", "spot32", "spot33", "spot34"]},310 {"names": ["spot21", "spot22", "spot23"]},311 {"names": ["spot41", "spot42"], "address": {"name": "address1"}},312 {"names": ["spot1"], "address": {"name": "address1"}},313 ]314class TestConcat:315 def test_normal(self):316 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]317 e = [{"names": ["spot31", "spot32"]}]318 assert Spot.from_dicts(d).concat(Spot.from_dicts(e)).to_dicts() == [319 {"names": ["spot1"], "address": {"name": "address1"}},320 {"names": ["spot21", "spot22"]},321 {"names": ["spot31", "spot32"]},322 ]323 def test_first(self):324 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]325 e = [{"names": ["spot31", "spot32"]}]326 assert Spot.from_dicts(d).concat(Spot.from_dicts(e), first=True).to_dicts() == [327 {"names": ["spot31", "spot32"]},328 {"names": ["spot1"], "address": {"name": "address1"}},329 {"names": ["spot21", "spot22"]},330 ]331class TestReduce:332 def test_normal(self):333 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]334 assert Spot.from_dicts(d).reduce(lambda r, x: r + len(x.names), 100) == 103335class TestSum:336 def test_normal(self):337 assert TList([1, 2, 3]).sum() == 6338class TestSumBy:339 def test_normal(self):340 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]341 assert Spot.from_dicts(d).sum_by(lambda x: len(x.names)) == 3342class TestCountBy:343 def test_normal(self):344 d = [345 {"names": ["spot1"], "address": {"name": "address1"}},346 {"names": ["spot21", "spot22"]},347 {"names": ["spot31", "spot32"]},348 ]349 assert Spot.from_dicts(d).count_by(lambda x: len(x.names)) == {1: 1, 2: 2}350class TestSize:351 def test_normal(self):352 d = [{"names": ["spot1"], "address": {"name": "address1"}}, {"names": ["spot21", "spot22"]}]353 assert Spot.from_dicts(d).size() == 2354class TestJoin:355 def test_normal(self):356 assert TList(["a", "bc", "def"]).join("---") == "a---bc---def"357 def test_including_not_str(self):358 with pytest.raises(TypeError):359 TList(["1", 2, "3"]).join("---")360class TestFind:361 def test_normal(self):362 d = [363 {"names": ["spot1"], "address": {"name": "address1"}},364 {"names": ["spot21", "spot22"]},365 {"names": ["spot31", "spot32", "spot33"]},366 ]367 assert Spot.from_dicts(d).find(lambda x: len(x.names) == 2).get().to_dict(368 ignore_none=True369 ) == {"names": ["spot21", "spot22"]}370 def test_not_found(self):371 d = [372 {"names": ["spot1"], "address": {"name": "address1"}},373 {"names": ["spot21", "spot22"]},374 {"names": ["spot31", "spot32"]},375 ]376 assert Spot.from_dicts(d).find(lambda x: len(x.names) == 3).is_none()377class TestAll:378 def test_true(self):379 d = [380 {"names": ["spot1"], "address": {"name": "address1"}},381 {"names": ["spot21", "spot22"]},382 {"names": ["spot31", "spot32"]},383 ]384 assert Spot.from_dicts(d).all(lambda x: x.names) is True385 def test_false(self):386 d = [387 {"names": ["spot1"], "address": {"name": "address1"}},388 {"names": ["spot21", "spot22"]},389 {"names": ["spot31", "spot32"]},390 ]391 assert Spot.from_dicts(d).all(lambda x: len(x.names) > 1) is False392class TestAny:393 def test_true(self):394 d = [395 {"names": ["spot1"], "address": {"name": "address1"}},396 {"names": ["spot21", "spot22"]},397 {"names": ["spot31", "spot32"]},398 ]399 assert Spot.from_dicts(d).any(lambda x: len(x.names) > 1) is True400 def test_false(self):401 d = [402 {"names": ["spot1"], "address": {"name": "address1"}},403 {"names": ["spot21", "spot22"]},404 {"names": ["spot31", "spot32"]},405 ]406 assert Spot.from_dicts(d).any(lambda x: len(x.names) > 2) is False407class TestIntersection:408 def test_normal(self):409 assert TList([1, 2, 3, 4, 5]).intersection([2, 4, 6]) == [2, 4]410 def test_empty(self):411 assert TList([1, 2, 3, 4, 5]).intersection([7, 8]) == []412class TestNotIntersection:413 def test_normal(self):414 assert TList([1, 2, 3, 4, 5]).not_intersection([2, 4, 6]) == [1, 3, 5]415 def test_empty(self):416 assert TList([1, 2, 3, 4, 5]).not_intersection([1, 2, 3, 4, 5]) == []417class TestReverse:418 def test_normal(self):419 assert TList([1, 2, 3]).reverse() == [3, 2, 1]420 def test_empty(self):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run green automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful