How to use bad_init method in pytest-cov

Best Python code snippet using pytest-cov

test_chinese.py

Source:test_chinese.py Github

copy

Full Screen

1import pickle2from hanziconv import HanziConv3from revscoring.datasources import revision_oriented4from revscoring.languages import chinese5from .util import compare_extraction6bad_init = [7 "王八蛋", # son of a bitch8 "他媽的", # "his mother's"9 "去你媽", # "to your mother"10 "去你的", # "to yours"11 "婊子", "妓女", # prostitute12 "日狗", "日了狗", # lonely dog13 "屁眼", "混蛋", "渾蛋", # asshole14 "混帳", # variant of above15 "王八", # bitch16 "白癡", # idiot17 "腦殘", # brain dead18 "智障", # mentally retarded19 "婊", "妓", # prostitute20 "屎", # shit21 "屌", # dick22 "妈逼", # (this is verbal but definitely bad)23 "艹", "肏", # fuck (in any context)24 "放屁", # fart25 # Variants (homonyms) of the use of "fuck" that use 操 ("operation") and26 # 草 ("grass"), "肏" is the actual character. "艹" is not a real character27 # but it's used this way28 "操你", "草你", "日你", # fuck you29 "操他", "草他", "日他", # fuck his30 "操她", "草她", "日她", # fuck her31 # Discrimination (racial slurs)32 "小日本", # little Japanese33 "台湾狗", # Taiwanese dogs34 "共产中国", # communist Chinese35 "流氓国家", # rogue country36 "人渣", # human slag37 "我去", # this is verbal and bad38 "鬼子" # devil, usually a suffix39]40BAD = [HanziConv.toSimplified(word) for word in bad_init] + \41 [HanziConv.toTraditional(word) for word in bad_init]42INFORMAL = [43 # Hello44 "你好", # nǐ hǎo; The standard "hello" greeting.45 "您好", # nín hǎo; The same "hello" greeting as above46 "你怎么样", # nǐ zěnmeyàng?; "What's up?", "How are you doing?"47 # Good afternoon48 "午安", # wǔ'an; note: seldom used in the Mainland.49 "下午好", # xìawǔ hǎo! Seldom used in the Republic of China50 # Good evening / Good night51 "晚安", # wǎn'an; Literally "Peace at night", Good night.52 "晚上好", # wǎnshang hǎo; Good evening!53 # Good-bye54 "再見", # zàijian; Literally "See you again".55 "明天見", # míngtian jiàn; Literally "See you tomorrow".56 "拜拜", # bāibāi/báibái; From English "Bye-Bye".57 "回頭見", # huítóujiàn: roughly equivalent to "see you soon"58 "回見", # huíjiàn; usually used in Beijing or written Chinese.59 "再會", # zàihuì: Literally "[we'll] hello again".60 "66666666", "666",61 "233", "2333333"62]63WORDS_TO_WATCH = [64 # Advertising language65 "本台", # this channel66 "本公司", # this company67 "代刷", "代练", "代抢", # someone who plays games for you68 "强势回归", # "mightly" return69 "超值", # very cost-effective70 "一条龙", # a proverb? "one line of dragon"71 "一夜情", # selling one's body (advertising)72 "世界一流", "国际一流", # world first-class73 "用户第一", "用户满意", "用户至上", # customer-first74 "核心价值", "核心团队", "核心宗旨", # core value75 "服务小姐", # service lady76 "服务范围", # service area77 "服务项目", # service items78 "服务理念", # service philosophy79]80OTHER = [81 """2005年大西洋颶風季是有纪录以来最活跃的大西洋颶風季,至今仍保持着多项纪录。82 全季对大范围地区造成毁灭性打击,共导致3,913人死亡,损失数额更创下新纪录,高达1592亿美元。83 本季单大型飓风就有7场之多,其中5场在登陆时仍有大型飓风强度,分别是颶風丹尼斯、艾米莉、84 卡特里娜、丽塔和威尔玛,大部分人员伤亡和财产损失都是这5场飓风引起。85 墨西哥的金塔納羅奧州和尤卡坦州,86 以及美国的佛罗里达州和路易斯安那州都曾两度受大型飓风袭击;古巴、巴哈马、海地,87 美国的密西西比州和得克萨斯州,还有墨西哥的塔毛利帕斯州都曾直接受1场大型飓风冲击,88 还有至少1场在附近掠过。美國墨西哥灣沿岸地區是本季受灾最严重的所在,89 飓风卡特里娜产生高达10米的风暴潮,引发毁灭性洪灾,密西西比州沿海地区的大部分建筑物被毁,90 风暴之后又令新奥尔良防洪堤决口,整个城市因此受到重创。此外,飓风斯坦同溫帶氣旋共同影响,91 在中美洲多地引发致命的泥石流,其中又以危地马拉灾情最为严重。"""92]93r_text = revision_oriented.revision.text94def simplified_eq(a, b):95 return len(a) == len(b) and \96 HanziConv.toSimplified(a[0]) == \97 HanziConv.toSimplified(b[0])98def test_badwords():99 compare_extraction(chinese.badwords.revision.datasources.matches,100 BAD, OTHER, eq=simplified_eq)101 assert chinese.badwords == pickle.loads(pickle.dumps(chinese.badwords))102def test_informals():103 compare_extraction(chinese.informals.revision.datasources.matches,104 INFORMAL, OTHER, eq=simplified_eq)105 assert chinese.informals == pickle.loads(pickle.dumps(chinese.informals))106def test_words_to_watch():107 compare_extraction(chinese.words_to_watch.revision.datasources.matches,108 WORDS_TO_WATCH, OTHER, eq=simplified_eq)109 assert chinese.words_to_watch == \...

Full Screen

Full Screen

learning.py

Source:learning.py Github

copy

Full Screen

1from model_class import *2from data_creation import *3import pickle4######################################### LEARNING PROCESS #############################################################56all_fpr = []7all_tpr = []8all_auc = []9lorenz = []10all_gini = []1112# loop to train models13for intensitytype in ('f'): # in ('f','h')1415 if intensitytype == 'f':16 hidden_dim = [5, 3]17 else:18 hidden_dim = [30, 15]1920 duan_replic = "" #duan_replic : full duan replic || #exp : exp activation function2122 # plot directory23 directory_roc = 'Results NN for Intensity estimation\\real dataset\\roc\\' + intensitytype + '\\' + str(hidden_dim) + '\\' #add back24 #directory_roc = 'Results NN for Intensity estimation\\real dataset\\roc\\' + intensitytype + '\\' + duan_replic + '\\' #remove25 if not os.path.exists(directory_roc):26 os.makedirs(directory_roc)2728 directory_lorenz = 'Results NN for Intensity estimation\\real dataset\\lorenz\\' + intensitytype + '\\' + str(hidden_dim) + '\\' #add back29 # directory_lorenz = 'Results NN for Intensity estimation\\real dataset\\lorenz\\' + intensitytype + '\\' + duan_replic + '\\' #remove30 if not os.path.exists(directory_lorenz):31 os.makedirs(directory_lorenz)3233 for tau in range(36):34 print('Estimating model ... ' + intensitytype + str(tau))35 deltaT = 1/1236 learning_rate=0.00137 feature_size = 1238 batch_size = 25639 perc = 0.940 name = intensitytype + str(tau) + '_hidden' + str(hidden_dim[0]) + '_layers' + str(len(hidden_dim)) + '_learning' + str(learning_rate) + '_batch' + \41 str(batch_size) + '_perc' + str(perc) + '_' #add back42 path = intensitytype + str(tau) + '_hidden' + str(hidden_dim[0]) + '_layers' + str(len(hidden_dim)) + '_learning' + str(learning_rate) + '_batch' + \43 str(batch_size) + '_perc' + str(perc) + '\\' #add back44 #name = intensitytype + str(tau) + '_hidden' + str(hidden_dim[0]) + '_layers' + str(len(hidden_dim)) + '_learning' + str(learning_rate) + '_batch' + \45 # str(batch_size) + '_perc' + str(perc) + duan_replic + '_' #remove46 #path = intensitytype + str(tau) + '_hidden' + str(hidden_dim[0]) + '_layers' + str(len(hidden_dim)) + '_learning' + str(learning_rate) + '_batch' + \47 # str(batch_size) + '_perc' + str(perc) + duan_replic + '\\' #remove4849 if intensitytype == 'f':50 x_train,y_train,x_test,y_test,_,_ = RealData_f(tau=tau)51 else:52 x_train,y_train,x_test,y_test,_,_ = RealData_h(tau=tau)535455 model = NeuralNetwork(hidden_dim = hidden_dim, deltaT = deltaT, learning_rate = learning_rate,56 feature_size = feature_size, batch_size = batch_size, perc = perc, path = path,name = name)57 self = model5859 # trick to avoid bad initialization (output of first forward propagation algorithm too close to boundary)60 model.initialise()61 _ , ff = model.pred(x_train,y_train)62 gg = 0.00001*np.ones(ff.shape)63 bad_init = np.allclose(ff,gg) # test if predictions are close to fmin boundary. if yes, initialize again64 if bad_init is True:65 while bad_init is True:66 model.initialise()67 _, ff = model.pred(x_train, y_train)68 gg = 0.00001 * np.ones(ff.shape)69 bad_init = np.allclose(ff, gg)7071 # training process72 for e in range(20):73 model.training(x_train,y_train,e)74 in_loss_value, in_f_value = model.pred(x_train,y_train)75 out_loss_value, out_f_value = model.pred_and_write_summary(x_test,y_test,e)76 print('insample-loss:', in_loss_value, '// outsample-loss:',out_loss_value)7778 #_,_,auc_score, fpr_val, tpr_val = testmodel(model = model, x = x_test,y = y_test, path = directory_roc, save=True) # computes auc score and save roc curve to directory79 perc, cumy, gini = LorenzCurve(model, x_test, y_test, tau, str(model.hidden_dim), color='green', path=directory_lorenz, save=True)8081 #all_auc.append(auc_score) # store auc_score for this particular model82 #all_fpr.append(fpr_val) # store auc_score for this particular model83 #all_tpr.append(tpr_val) # store auc_score for this particular model84 lorenz.append((perc,cumy))85 all_gini.append(gini)8687 tf.reset_default_graph()88 model.sess.close()8990 with open(directory_lorenz + "gini.txt", "w") as f:91 for s in all_gini:92 f.write(str(s) + "\n")9394 with open(directory_lorenz + 'lorenz.pkl', 'wb') as f:95 pickle.dump(lorenz, f)9697 all_auc = []98 lorenz = []99100101102103104105 ...

Full Screen

Full Screen

lqrflm_load_play.py

Source:lqrflm_load_play.py Github

copy

Full Screen

1import numpy as np2import math3import gym4env_name = 'Pendulum-v0'5env = gym.make(env_name)6gains = np.loadtxt('./save_weights/kalman_gain.txt', delimiter=" ")7T = gains[-1, 0]8T = np.int(T)9Kt = gains[:, 1:4]10kt = gains[:, -1]11i_ang = 180.0*np.pi/180.012x0 = np.array([math.cos(i_ang), math.sin(i_ang), 0])13bad_init = True14while bad_init:15 state = env.reset() # shape of observation from gym (3,)16 x0err = state - x017 if np.sqrt(x0err.T.dot(x0err)) < 0.1: # x0=(state_dim,)18 bad_init = False19for time in range(T+1):20 env.render()21 Ktt = np.reshape(Kt[time, :], [1, 3])22 action = Ktt.dot(state) + kt[time]23 action = np.clip(action, -env.action_space.high[0], env.action_space.high[0])24 ang = math.atan2(state[1], state[0])25 print('Time: ', time, ', angle: ', ang * 180.0 / np.pi, 'action: ', action)26 state, reward, _, _ = env.step(action)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pytest-cov automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful