How to use test_long method in green

Best Python code snippet using green

datasets.py

Source:datasets.py Github

copy

Full Screen

1import pandas as pd2import numpy as np3import torch4from torch.utils import data # 获取迭代数据56def label2num(label):7 if label == 'neutral':8 return 09 if label == 'positive':10 return 111 if label == 'negative':12 return 21314def MVSA_single_text_data_load(model_name):151617 # 读取label数据18 label_path = '../Preprocessed data/singleLabel_new.txt'19 ID = []20 text_label = []21 image_label = []22 all_label = []23 f = open(label_path, 'r')24 for line in f:25 ID.append(int(line.split()[0]))26 text_label.append(label2num(line.split()[1].split(',')[0]))27 image_label.append(label2num(line.split()[1].split(',')[1]))28 all_label.append(label2num(line.split()[1].split(',')[2]))29 f.close()30 text_feature = []3132 #text_feature_path = '../Preprocessed data/raw_txt_XLNet/single/' ##文本向量33 text_feature_path = '../Preprocessed data/text-bert-single/' ##文本向量3435 for i in range(1, 5200):36 if i not in ID:37 continue38 filename = text_feature_path + str(i) + '.txt'39 try:40 with open(filename, 'r') as f:41 line = f.readline()42 line = line.strip()43 line = eval(line)[0]44 if line == 0:45 line = [0 for _ in range(1024)]46 text_feature.append(list(line))47 # print(lis)48 f.close()49 except:50 continue51 print("文本数据读取完成。")5253 dataset_len = len(text_feature)5455 train_text_data = torch.from_numpy(np.array(text_feature[:int(dataset_len * 9 / 10)]))56 train_label_data = torch.from_numpy(np.array(all_label[:int(dataset_len * 9 / 10)]))57 test_text_data = torch.from_numpy(np.array(text_feature[int(dataset_len * 9 / 10):]))58 test_label_data = torch.from_numpy(np.array(all_label[int(dataset_len * 9 / 10):]))5960 train_long = len(train_text_data)61 test_long = len(test_text_data)62 print("train_long: ", train_long)63 print("test_long: ", test_long)6465 if model_name == 'CNN':66 train_text_data = train_text_data.resize_(int(dataset_len * 9 / 10), 1, 32, 32)67 test_text_data = test_text_data.resize_(int(dataset_len) - int(dataset_len * 9 / 10), 1, 32, 32)68 if model_name == 'RNN':69 train_text_data = train_text_data.resize_(int(dataset_len * 9 / 10), 1, 1024)70 test_text_data = test_text_data.resize_(int(dataset_len) - int(dataset_len * 9 / 10), 1, 1024)71 if model_name == 'AttnRNN':72 train_text_data = train_text_data.resize_(int(dataset_len * 9 / 10), 1, 1024)73 test_text_data = test_text_data.resize_(int(dataset_len) - int(dataset_len * 9 / 10), 1, 1024)7475 """7677 train_text_data = torch.from_numpy(np.array(text_feature[:int(dataset_len * 4 / 5)]))78 train_label_data = torch.from_numpy(np.array(text_label[:int(dataset_len * 4 / 5)]))79 test_text_data = torch.from_numpy(np.array(text_feature[int(dataset_len * 4 / 5):]))80 test_label_data = torch.from_numpy(np.array(text_label[int(dataset_len * 4 / 5):]))8182 train_long = len(train_text_data)83 test_long = len(test_text_data)84 print("train_long: ", train_long)85 print("test_long: ", test_long)86 if model_name == 'CNN':87 train_text_data = train_text_data.resize_(train_long, 1, 32, 32)88 test_text_data = test_text_data.resize_(test_long, 1, 32, 32)89 if model_name == 'RNN':90 train_text_data = train_text_data.resize_(train_long, 1, 1024)91 test_text_data = test_text_data.resize_(test_long, 1, 1024)92 """93 train_data = zip(train_text_data, train_label_data)94 test_data = zip(test_text_data, test_label_data)9596 train_loader = data.DataLoader(list(train_data), batch_size=1, shuffle=False)97 test_loader = data.DataLoader(list(test_data), batch_size=1, shuffle=False)9899 return train_loader, test_loader100101def MVSA_single_img_data_load(model_name):102103104 # 读取label数据105 label_path = '../Preprocessed data/singleLabel_new.txt'106 ID = []107 text_label = []108 image_label = []109 all_label = []110 f = open(label_path, 'r')111 for line in f:112 ID.append(int(line.split()[0]))113 text_label.append(label2num(line.split()[1].split(',')[0]))114 image_label.append(label2num(line.split()[1].split(',')[1]))115 all_label.append(label2num(line.split()[1].split(',')[2]))116 f.close()117118 img_feature = []119120 img_feature_path = '../Preprocessed data/raw_img_VGGNet16_Normalized/single/' ##文本向量121122 for i in range(1, 5200):123 if i not in ID:124 continue125 filename = img_feature_path + str(i) + '.txt'126 try:127 with open(filename, 'r') as f:128 line = f.readline()129 line = line.strip()130 line = eval(line)131 img_feature.append(line)132 # print(lis)133 f.close()134 except:135 continue136 print("图片数据读取完成。")137138 dataset_len = len(img_feature)139140141 train_text_data = torch.from_numpy(np.array(img_feature[:int(dataset_len * 9 / 10)]))142 #train_label_data = torch.from_numpy(np.array(image_label[:int(dataset_len * 9 / 10)]))143 train_label_data = torch.from_numpy(np.array(all_label[:int(dataset_len * 9 / 10)]))144 test_text_data = torch.from_numpy(np.array(img_feature[int(dataset_len * 9 / 10):]))145 #test_label_data = torch.from_numpy(np.array(image_label[int(dataset_len * 9 / 10):]))146 test_label_data = torch.from_numpy(np.array(all_label[int(dataset_len * 9 / 10):]))147148 train_long = len(train_text_data)149 test_long = len(test_text_data)150 print("train_long: ", train_long)151 print("test_long: ", test_long)152153 if model_name == 'CNN':154 train_text_data = train_text_data.resize_(train_long, 1, 32, 32)155 test_text_data = test_text_data.resize_(test_long, 1, 32, 32)156 if model_name == 'RNN':157 train_text_data = train_text_data.resize_(train_long, 1, 1024)158 test_text_data = test_text_data.resize_(test_long, 1, 1024)159 if model_name == 'AttnRNN':160 train_text_data = train_text_data.resize_(train_long, 1, 1024)161 test_text_data = test_text_data.resize_(test_long, 1, 1024)162163 """164165 train_text_data = torch.from_numpy(np.array(text_feature[:int(dataset_len * 4 / 5)]))166 train_label_data = torch.from_numpy(np.array(text_label[:int(dataset_len * 4 / 5)]))167 test_text_data = torch.from_numpy(np.array(text_feature[int(dataset_len * 4 / 5):]))168 test_label_data = torch.from_numpy(np.array(text_label[int(dataset_len * 4 / 5):]))169170 train_long = len(train_text_data)171 test_long = len(test_text_data)172 print("train_long: ", train_long)173 print("test_long: ", test_long)174 if model_name == 'CNN':175 train_text_data = train_text_data.resize_(train_long, 1, 32, 32)176 test_text_data = test_text_data.resize_(test_long, 1, 32, 32)177 if model_name == 'RNN':178 train_text_data = train_text_data.resize_(train_long, 1, 1024)179 test_text_data = test_text_data.resize_(test_long, 1, 1024)180 """181 train_data = zip(train_text_data, train_label_data)182 test_data = zip(test_text_data, test_label_data)183184185 train_loader = data.DataLoader(list(train_data), batch_size=1, shuffle=False)186 test_loader = data.DataLoader(list(test_data), batch_size=1, shuffle=False)187188 print(test_loader)189 print(test_text_data)190191 return train_loader, test_loader192193194def MVSA_single_all_label_load():195196197 # 读取label数据198 label_path = '../Preprocessed data/singleLabel_new.txt'199 ID = []200 text_label = []201 image_label = []202 all_label = []203 f = open(label_path, 'r')204 for line in f:205 ID.append(int(line.split()[0]))206 text_label.append(label2num(line.split()[1].split(',')[0]))207 image_label.append(label2num(line.split()[1].split(',')[1]))208 all_label.append(label2num(line.split()[1].split(',')[2]))209210 print("标签数据读取完成。")211212213 return ID, text_label, image_label, all_label214215216217def MVSA_multiple_all_label_load():218219 def labelchange(label):220 if label == 0:221 return 1222 elif label == 1:223 return 0224 else:225 return 2226227228229 # 读取label数据230 label_path = '../Preprocessed data/multipleLabel.txt'231 ID = []232 all_label = []233 f = open(label_path, 'r')234 for line in f:235 ID.append(int(line.split(',')[0]))236 all_label.append(labelchange(int(line.split(',')[1])))237238239 print("标签数据读取完成。")240241242 return ID, all_label243244245def MVSA_single_correlation_load():246 def Z_ScoreNormalization(x):247 x = (x - np.average(x)) / np.std(x)248 x = (x - np.min(x)) / (np.max(x) - np.min(x)) + 0.5249 return x250251 #cor_path = '../Preprocessed data/single_correlation.txt'252 cor_path = '../Preprocessed data/single_bert_VGG_cos.txt'253 cor = []254 f = open(cor_path, 'r')255 for line in f:256 cor.append(float(line))257 cor_normal = Z_ScoreNormalization(cor)258259 print("相关性读取完毕")260 return cor_normal261262263def MVSA_multiple_correlation_load():264 def Z_ScoreNormalization(x):265 x = (x - np.average(x)) / np.std(x)266 x = (x - np.min(x)) / (np.max(x) - np.min(x)) + 0.5267 return x268269 #cor_path = '../Preprocessed data/multiple_correlation.txt'270 cor_path = '../Preprocessed data/multiple_bert_VGG_cos.txt'271272 cor = []273 f = open(cor_path, 'r')274 for line in f:275 cor.append(float(line))276 cor_normal = Z_ScoreNormalization(cor)277278 print("相关性读取完毕") ...

Full Screen

Full Screen

Germany_PVdistribution-checkpoint.py

Source:Germany_PVdistribution-checkpoint.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2## @namespace Germany_PVdistribution3# Created on Wed Feb 28 09:47:22 20184# Author5# Alejandro Pena-Bello6# alejandro.penabello@unige.ch7# Script developed for the project developed together with the Consumer Decision and Sustainable Behavior Lab to include the user preferences in the charging and discharging of the battery.8# The script has been tested in Linux and Windows9# This script includes five functions10# TODO11# ----12# User Interface13# Requirements14# ------------15# Pandas, numpy, itertools,sys,glob,multiprocessing, time16import pandas as pd17import matplotlib.pyplot as plt18import Model as M19import numpy as np20import os21import sys22# ## Munich GHI and Temperature 201523# source: soda-pro.com24def PV_generation(path):25 '''26 Description27 -----------28 This function get input data from Input_data_PV3.csv and fill missing data if needed and filter for 2015.29 Parameters30 ----------31 path : string ; Is the path where all the input data will be stored.32 Returns33 ------34 df: DataFrame; Dataframe includes temperature and GHI with timestamp35 TODO36 ------37 Do it more general38 '''39 print('##############################')40 print('PV_Gen')41 df=pd.read_csv(path+'Input/Input_data_PV3.csv',42 encoding='utf8', sep=';',engine='python',index_col=12,parse_dates=[12],infer_datetime_format=True )43 df.index = df.index.tz_localize('UTC').tz_convert('CET')44 df=df[df.index.year==2015]45 df.GHI=df.GHI/.2546 # ## Filling missing data47 if df.Temperature.isnull().sum():48 df['Temperature']=df['Temperature']-273.15#To °C49 df['Date']=df.index.date50 df['TimeOnly']=df.index.time51 test_pivot=df.pivot_table(values='Temperature', columns='TimeOnly', index='Date')52 test_filled=test_pivot.fillna(method='ffill')53 test_long = test_filled.stack()54 test_long.name='aux'55 test_long = test_long.reset_index()56 test_long['Time'] = test_long.apply(lambda r : pd.datetime.combine(r['Date'], r['TimeOnly']), axis='columns')57 test_long = test_long[['Time', 'aux']]58 test_long=test_long.sort_values(['Time'])59 #test_long=test_long.set_index('Time')60 test_long.index=df.index61 df['Temperature']=test_long['aux']62 if df.GHI.isnull().sum():63 df['Date']=df.index.date64 df['TimeOnly']=df.index.time65 test_pivot=df.pivot_table(values='GHI', columns='TimeOnly', index='Date')66 test_filled=test_pivot.fillna(method='ffill')67 test_long = test_filled.stack()68 test_long.name='aux'69 test_long = test_long.reset_index()70 test_long['Time'] = test_long.apply(lambda r : pd.datetime.combine(r['Date'], r['TimeOnly']), axis='columns')71 test_long = test_long[['Time', 'aux']]72 test_long=test_long.sort_values(['Time'])73 test_long.index=df.index74 df['GHI']=test_long['aux']75 return (df)76def PV_output_inclinations(azimuths,inclinations,df,res,phi):77 '''78 Description79 -----------80 This function will generate PV outputs for different azimuths and inclinations taking into account the inputs. df must contain a column called GHI and one Temperature it will put all the outputs in a csv file located in a folder called PV_Gen. The name follows this nomenclature: PV_Generation_Gamma_Beta.csv where beta is inclination and gamma is azimuth81 Parameters82 ----------83 df: DataFrame; includes Temperature and GHI84 phi: float; latitude where the panel will be installed85 res: float; temporal resolution86 inclinations: numpy array; inclination87 azimuths: numpy array; azimuth88 Returns89 ------90 TODO91 ------92 '''93 print('##############################')94 print('PV_output_inclinations')95 i=096 for gamma in azimuths:97 for beta in inclinations:98 print(i)99 out=M.inputs(beta=beta,gamma=gamma,df=df,phi=phi,res=15)100 print(out)101 df_out=pd.DataFrame(out)102 df_out=df_out.set_index(df.index)103 name_file=path+'PV_Gen/PV_Generation_'+str(gamma)+'_'+str(beta)+'.csv'104 df_out.to_csv(name_file)105 i+=1106 return107def Distribution(path):108 '''109 Description110 -----------111 This function gets the distribution of PV size from Germany for sizes smaller than 10kW. The PV size distribution is saved in the Input folder under the name PV_size_distribution.csv112 Parameters113 ----------114 path : string ; Is the path where all the input data is stored.115 Returns116 ------117 TODO118 ------119 Do it more general and for other countries120 '''121 print('##############################')122 print('Distribution')123 df=pd.read_csv(path+'Input/105_devices_utf8.csv', encoding='utf8', sep=';',engine='python',header=3)124 df_sol=df[df.Anlagentyp=='Solarstrom']125 cap_sol=df_sol['Nennleistung(kWp_el)']126 cap_sol=cap_sol.apply(lambda x: x.replace('.',''))127 cap_sol=cap_sol.apply(lambda x: x.replace(',','.'))128 cap_sol=cap_sol.astype(float)129 res_sol=cap_sol[cap_sol<10]130 res_sol=res_sol.reset_index(drop=True)131 res_sol.to_csv(path+'Input/PV_size_distribution.csv')132 return()133def German_load(path):134 '''135 Description136 -----------137 This function gets the yearly and daily average of the german load from DE_load_15_min_Power and put it in a folder called Input as csv. For this it reads a the file from DE_load_15_min_Power.csv which comes from the paper Representative electrical load profiles of residential buildings in Germany with an original temporal resolution of one second Tjaden et al. reshaped to 15 minutes resolution. German demand curves (2010)138 Parameters139 ----------140 path : string ; Is the path where all the input data is stored.141 Returns142 ------143 TODO144 ------145 Do it more general and for other countries146 '''147 print('##############################')148 print('German_load')149 df_15power=pd.read_csv('C:/Users/alejandro/Dropbox/0. PhD/Python/Paper_psycho/Input/DE_load_15_min_Power.csv',150 index_col=[0],parse_dates=[0],infer_datetime_format=True )151 df_15power.index=df_15power.index.tz_localize('UTC').tz_convert('Europe/Brussels')152 a=(df_15power.mean(axis=1)/4)153 a.to_csv(path+'Input/German_yearly_average_load_curve_kWh.csv')154 b=(a.groupby([a.index.hour,a.index.minute]).mean())155 b.to_csv(path+'Input/German_daily_average_load_curve_kWh.csv')156 return()157def PV_gen_munich(path):158 '''159 Description160 -----------161 This function reads the outputs in PV Gen and put them together in a df (normalized @ 1kW) delivered in a csv in the Input folder, called DE_gen_15_min_Energy.csv162 Parameters163 ----------164 path : string ; Is the path where all the input data is stored.165 Returns166 ------167 TODO168 ------169 Do it more general and for other countries170 '''171 print('##############################')172 print('PV_gen_munich')173 path2=path+'PV_gen/'174 mat=np.array(['Azimuth','Inclination','PV_output','Capacity_factor'])175 for file in os.listdir(path2):176 #We want to have the PV_output, Capacity_factor, Inclination and Azimuth in a table (PV_munich)177 df=pd.read_csv(path2+file,178 encoding='utf8', sep=',',engine='python',parse_dates=[0],infer_datetime_format=True,index_col=0)179 aux=file.split('_')180 arr=np.array([aux[2], aux[3].split('.')[0], (df.sum()/4/230).values[0],(df.sum()/4/230/(365*24)).values[0]])181 mat=np.vstack((mat,arr))182 PV_munich=pd.DataFrame(mat[1:].astype(float).round(2),columns=mat[0])183 PV_munich.sort_values('PV_output',ascending=False)184 result=pd.read_csv(path2+os.listdir(path2)[0], encoding='utf8', sep=',',185 engine='python',parse_dates=[0],infer_datetime_format=True,index_col=0)186 result.columns=['PV_'+os.listdir(path2)[0].split('_')[2]+'_'+os.listdir(path2)[0].split('_')[3].split('.')[0]]187 result.index = result.index.tz_localize('UTC').tz_convert('CET')188 for file in os.listdir(path2)[1:]:189 df=pd.read_csv(path2+file, encoding='utf8', sep=',',engine='python',parse_dates=[0],infer_datetime_format=True,index_col=0)190 df.columns=['PV_'+file.split('_')[2]+'_'+file.split('_')[3].split('.')[0]]191 df.index = df.index.tz_localize('UTC').tz_convert('CET')192 result = pd.concat([result, df], axis=1, join='inner')193 # ### Normalize to 1 kW array194 result=(result/4/230)195 #result=result.drop('Index')196 #result.index=pd.to_datetime(result.index)197 result.to_csv(path+'Input/DE_gen_15_min_Energy.csv')...

Full Screen

Full Screen

test_long_stdlib.py

Source:test_long_stdlib.py Github

copy

Full Screen

1# Licensed to the .NET Foundation under one or more agreements.2# The .NET Foundation licenses this file to you under the Apache 2.0 License.3# See the LICENSE file in the project root for more information.4##5## Run selected tests from test_long from StdLib6##7import unittest8import sys9from iptest import run_test10import test.test_long11def load_tests(loader, standard_tests, pattern):12 if sys.implementation.name == 'ironpython':13 suite = unittest.TestSuite()14 suite.addTest(test.test_long.LongTest('test__format__'))15 suite.addTest(test.test_long.LongTest('test_access_to_nonexistent_digit_0'))16 suite.addTest(test.test_long.LongTest('test_bit_length'))17 suite.addTest(test.test_long.LongTest('test_bitop_identities'))18 suite.addTest(test.test_long.LongTest('test_conversion'))19 suite.addTest(unittest.expectedFailure(test.test_long.LongTest('test_correctly_rounded_true_division'))) # https://github.com/IronLanguages/ironpython3/issues/90720 suite.addTest(test.test_long.LongTest('test_division'))21 suite.addTest(unittest.expectedFailure(test.test_long.LongTest('test_float_conversion'))) # https://github.com/IronLanguages/ironpython3/issues/90722 suite.addTest(test.test_long.LongTest('test_float_overflow'))23 suite.addTest(test.test_long.LongTest('test_format'))24 suite.addTest(test.test_long.LongTest('test_from_bytes'))25 suite.addTest(test.test_long.LongTest('test_karatsuba'))26 suite.addTest(test.test_long.LongTest('test_logs'))27 suite.addTest(test.test_long.LongTest('test_long'))28 suite.addTest(test.test_long.LongTest('test_mixed_compares'))29 suite.addTest(test.test_long.LongTest('test_nan_inf'))30 suite.addTest(test.test_long.LongTest('test_round'))31 suite.addTest(test.test_long.LongTest('test_shift_bool'))32 suite.addTest(unittest.expectedFailure(test.test_long.LongTest('test_small_ints'))) # https://github.com/IronLanguages/ironpython3/issues/97533 suite.addTest(test.test_long.LongTest('test_to_bytes'))34 suite.addTest(unittest.expectedFailure(test.test_long.LongTest('test_true_division'))) # https://github.com/IronLanguages/ironpython3/issues/90735 return suite36 else:37 return loader.loadTestsFromModule(test.test_long, pattern)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run green automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful