How to use support_file method in Nose2

Best Python code snippet using nose2

parsers.py

Source:parsers.py Github

copy

Full Screen

1import copy2from . import easygdf3import time4import numpy as np5import re6import os7from gpt.tools import full_path8import shutil9# ------ Number parsing ------10def isfloat(value):11 try:12 float(value)13 return True14 except ValueError:15 return False16def find_path(line, pattern=r'"([^"]+\.gdf)"'):17 matches=re.findall(pattern, line)18 return matches19 20 21 22def set_support_files_orig(lines, 23 original_path, 24 target_path='', 25 copy_files=False, 26 pattern=r'"([^"]+\.gdf)"', 27 verbose=False):28 print('set SUPPORT ORIG')29 30 for ii, line in enumerate(lines):31 support_files = find_path(line, pattern=pattern)32 33 for support_file in support_files:34 #print(full_path(support_file))35 abs_original_path = full_path( os.path.join(original_path, os.path.expandvars(support_file)) )36 37 if(copy_files):38 39 abs_target_path = os.path.join(target_path, support_file) 40 shutil.copyfile(abs_original_path, abs_target_path, follow_symlinks=True) 41 if(verbose):42 print("Copying file: ", abs_original_path,'->',abs_target_path) 43 else:44 if(os.path.isfile(abs_original_path)):45 lines[ii] = line.replace(support_file, abs_original_path)46 if(verbose):47 print("Set path to file: ",lines[ii]) 48 49def set_support_files(lines, 50 original_path, 51 target='', 52 copy_files=False, 53 pattern=r'"([^"]+\.gdf)"',54 verbose=False):55 56 for ii, line in enumerate(lines):57 support_files = find_path(line, pattern=pattern)58 59 for support_file in support_files:60 abs_original_path = full_path( os.path.join(original_path, os.path.expandvars(support_file)) )61 #print(support_file, os.path.join(original_path, os.path.expandvars(support_file)), line)62 63 if(copy_files):64 65 abs_target_path = os.path.join(target_path, support_file) 66 shutil.copyfile(abs_original_path, abs_target_path, follow_symlinks=True) 67 if(verbose):68 print("Copying file: ", abs_original_path,'->',abs_target_path) 69 else:70 if(os.path.isfile(abs_original_path)):71 72 #print(target, abs_original_path)73 dest = full_path( os.path.join(target, os.path.basename(abs_original_path)) )74 # Make symlink75 # Replace old symlinks. 76 if os.path.islink(dest):77 os.unlink(dest)78 elif os.path.exists(dest):79 80 if verbose:81 print(dest, 'exists, will not symlink')82 continue83 # Note that the following will raise an error if the dest is an actual file that exists 84 os.symlink(abs_original_path, dest)85 if verbose:86 print('Linked', abs_original_path, 'to', os.path.basename(dest) )87 lines[ii] = line.replace(support_file, os.path.basename(dest))88def parse_gpt_input_file(filePath, condense=False, verbose=False):89 """90 Parses GPT input file 91 """92 finput={}93 with open(filePath, 'r') as f:94 clean_lines = []95 # Get lines without comments96 for line in f:97 tokens = line.strip().split('#')98 if(len(tokens[0])>0):99 clean_line = tokens[0].strip().replace('\n', '')100 clean_lines.append(clean_line)101 variables={}102 for ii,line in enumerate(clean_lines):103 104 tokens = line.split("=")105 if(len(tokens)==2 and isfloat(tokens[1][:-1].strip())):106 107 name = tokens[0].strip()108 value = float(tokens[1][:-1].strip())109 110 if(name not in variables.keys()):111 variables[name]=value 112 elif(verbose):113 print(f'Warning: multiple definitions of variable {name} on line {ii}.')114 support_files={}115 for ii, line in enumerate(clean_lines):116 for sfile in find_path(line):117 if(sfile not in support_files):118 support_files[ii]=sfile119 120 121 finput['lines']=clean_lines122 finput['variables']=variables123 finput['support_files'] = support_files124 return finput125def write_gpt_input_file(finput, inputFile, ccs_beg='wcs'):126 #print(inputFile)127 for var in finput['variables'].keys():128 value=finput['variables'][var]129 for index, line in enumerate(finput['lines']):130 tokens = line.split('=')131 if(len(tokens)==2 and tokens[0].strip()==var):132 finput["lines"][index]=f'{var}={value};'133 break134 135 with open(inputFile,'w') as f:136 for line in finput["lines"]:137 f.write(line+"\n")138 if(ccs_beg!="wcs"):139 f.write(f'settransform("{ccs_beg}", 0,0,0, 1,0,0, 0,1,0, "beam");\n')140def read_particle_gdf_file(gdffile, verbose=0.0, extra_screen_keys=['q','nmacro'], load_files=False): #,'ID', 'm']):141 with open(gdffile, 'rb') as f:142 data = easygdf.load_initial_distribution(f, extra_screen_keys=extra_screen_keys)143 screen = {}144 n = len(data[0,:])145 if(n>0):146 q = data[7,:] # elemental charge/macroparticle147 nmacro = data[8,:] # number of elemental charges/macroparticle148 149 weights = np.abs(data[7,:]*data[8,:])/np.sum(np.abs(data[7,:]*data[8,:]))150 screen = {"x":data[0,:],"GBx":data[1,:],151 "y":data[2,:],"GBy":data[3,:],152 "z":data[4,:],"GBz":data[5,:],153 "t":data[6,:],154 "q":data[7,:],155 "nmacro":data[8,:],156 "w":weights,157 "G":np.sqrt(data[1,:]*data[1,:]+data[3,:]*data[3,:]+data[5,:]*data[5,:]+1)}158 159 #screen["Bx"]=screen["GBx"]/screen["G"]160 #screen["By"]=screen["GBy"]/screen["G"]161 #screen["Bz"]=screen["GBz"]/screen["G"]162 screen["time"]=np.sum(screen["w"]*screen["t"])163 screen["n"]=n 164 return screen165def read_gdf_file(gdffile, verbose=False, load_fields=False):166 167 # Read in file:168 169 #self.vprint("Current file: '"+data_file+"'",1,True)170 #self.vprint("Reading data...",1,False)171 t1 = time.time()172 with open(gdffile, 'rb') as f:173 174 if(load_fields):175 extra_tout_keys = ['q', 'nmacro', 'ID', 'm', 'fEx', 'fEy', 'fEz', 'fBx', 'fBy', 'fBz']176 else:177 extra_tout_keys = ['q', 'nmacro', 'ID', 'm']178 179 touts, screens = easygdf.load(f, extra_screen_keys=['q','nmacro', 'ID', 'm'], extra_tout_keys=extra_tout_keys)180 181 t2 = time.time()182 if(verbose):183 print(f' GDF data loaded, time ellapsed: {t2-t1:G} (sec).')184 185 #self.vprint("Saving wcs tout and ccs screen data structures...",1,False)186 tdata, fields = make_tout_dict(touts, load_fields=load_fields)187 pdata = make_screen_dict(screens)188 return (tdata, pdata, fields)189def make_tout_dict(touts, load_fields=False):190 tdata=[]191 fields = []192 count = 0193 for data in touts:194 n=len(data[0,:])195 196 if(n>0):197 q = data[7,:] # elemental charge/macroparticle198 nmacro = data[8,:] # number of elemental charges/macroparticle199 200 if(np.sum(q)==0 or np.sum(nmacro)==0):201 weights = data[10,:]/np.sum(data[10,:]) # Use the mass if no charge is specified202 else:203 weights = np.abs(data[7,:]*data[8,:])/np.sum(np.abs(data[7,:]*data[8,:]))204 tout = {"x":data[0,:],"GBx":data[1,:],205 "y":data[2,:],"GBy":data[3,:],206 "z":data[4,:],"GBz":data[5,:],207 "t":data[6,:],208 "q":data[7,:],209 "nmacro":data[8,:],210 "ID":data[9,:],211 "m":data[10,:],212 "w":weights,213 "G":np.sqrt(data[1,:]*data[1,:]+data[3,:]*data[3,:]+data[5,:]*data[5,:]+1)}214 #tout["Bx"]=tout["GBx"]/tout["G"]215 #tout["By"]=tout["GBy"]/tout["G"]216 #tout["Bz"]=tout["GBz"]/tout["G"]217 tout["time"]=np.sum(tout["w"]*tout["t"])218 tout["n"]=len(tout["x"])219 tout["number"]=count220 221 if(load_fields):222 field = {'Ex':data[11,:], 'Ey':data[12,:], 'Ez':data[13,:],223 'Bx':data[14,:], 'By':data[15,:], 'Bz':data[16,:]}224 else:225 field=None226 227 fields.append(field)228 count=count+1229 tdata.append(tout)230 return tdata, fields231def make_screen_dict(screens):232 pdata=[]233 234 count=0235 for data in screens:236 n = len(data[0,:])237 if(n>0):238 q = data[7,:] # elemental charge/macroparticle239 nmacro = data[8,:] # number of elemental charges/macroparticle240 241 if(np.sum(q)==0 or np.sum(nmacro)==0):242 weights = data[10,:]/np.sum(data[10,:]) # Use the mass if no charge is specified243 else:244 weights = np.abs(data[7,:]*data[8,:])/np.sum(np.abs(data[7,:]*data[8,:]))245 screen = {"x":data[0,:],"GBx":data[1,:],246 "y":data[2,:],"GBy":data[3,:],247 "z":data[4,:],"GBz":data[5,:],248 "t":data[6,:],249 "q":data[7,:],250 "nmacro":data[8,:],251 "ID":data[9,:],252 "m":data[10,:],253 "w":weights,254 "G":np.sqrt(data[1,:]*data[1,:]+data[3,:]*data[3,:]+data[5,:]*data[5,:]+1)}255 256 #screen["Bx"]=screen["GBx"]/screen["G"]257 #screen["By"]=screen["GBy"]/screen["G"]258 #screen["Bz"]=screen["GBz"]/screen["G"]259 screen["time"]=np.sum(screen["w"]*screen["t"])260 screen["n"]=n261 screen["number"]=count262 count=count+1263 pdata.append(screen)264 265 t2 = time.time()266 #self.vprint("done. Time ellapsed: "+self.ptime(t1,t2)+".",0,True)267 ts=np.array([screen['time'] for screen in pdata])268 sorted_indices = np.argsort(ts)269 return [pdata[sii] for sii in sorted_indices]270def parse_gpt_string(line):271 return re.findall(r'\"(.+?)\"',line) 272def replace_gpt_string(line,oldstr,newstr):273 strs = parse_gpt_string(line)274 assert oldstr in strs, 'Could not find string '+oldstr+' for string replacement.'...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1import argparse2import sys3import os4import zipfile5import constatnts6from record import Record7from analytics.data import Data8import json9from shutil import copy10# output directory11output = ''12def create_arg_parser():13 parser = argparse.ArgumentParser(description='Analyzes results from TPM testing.')14 parser.add_argument('input', help='Path to the results directory.')15 parser.add_argument('--output', help='Path to the output folder, will create files on its own')16 parser.add_argument('-d', default=False,17 dest='check_for_unzips',18 help='Will not extract an archive if already extracted (by name)')19 return parser20def scan_directory(input_directory, check_for_unzips):21 class FolderExistsException(Exception):22 pass23 rcrds = []24 # unzip25 index = 126 for line in os.listdir(input_directory):27 file_path = input_directory + '/' + line28 rcrd = Record(file_path)29 if not line.startswith("."):30 if zipfile.is_zipfile(file_path):31 with zipfile.ZipFile(file_path, "r") as zip_ref:32 try:33 folder_path = os.path.splitext(file_path)[0]34 folder_index = 035 while os.path.exists(folder_path if folder_index == 0 else folder_path + '_' + str(folder_index)):36 if check_for_unzips:37 raise FolderExistsException38 folder_index += 139 if folder_index > 0:40 rcrd.set_flag(constatnts.RECORD_FLAG_NON_UNIQUE_FOLDER_NAME, True)41 folder_path += ('_' + str(folder_index))42 zip_ref.extractall(folder_path)43 setattr(rcrd, 'path', folder_path)44 except (FolderExistsException, zipfile.BadZipfile) as ex:45 continue46 else:47 if line.endswith('.zip'):48 continue49 setattr(rcrd, 'index', index)50 index += 151 rcrds.append(rcrd)52 return rcrds53def sort_lists():54 Data.global_supported_algorithms.sort(key=lambda h: int(h, 0))55 Data.global_supported_commands.sort(key=lambda h: int(h, 0))56 Data.global_supported_ecc.sort(key=lambda h: int(h, 0))57def sort_results(cols):58 move_back = []59 cols.sort(key=sort_by_vendor)60 for index, result in enumerate(cols):61 for i, dataset in enumerate(result['dataset']):62 if 'no_tpm' in dataset and i == 0:63 if dataset['no_tpm']:64 move_back.append(index)65 break66 for x, index in enumerate(move_back):67 cols.append(cols.pop(index - x))68 for index, result in enumerate(cols):69 result['id'] = index70 name = ''71 if result['dataset'][0]['manufacturer']:72 name += result['dataset'][0]['manufacturer']73 if result['dataset'][0]['firmware']:74 name += ' ' + result['dataset'][0]['firmware']75 if result['dataset'][0]['vendor']:76 name += ' ' + result['dataset'][0]['vendor']77 if name:78 result['name'] = name79def sort_by_vendor(e):80 manufacturer = None81 fw = None82 if 'manufacturer' in e['dataset'][0]:83 manufacturer = e['dataset'][0]['manufacturer']84 if 'firmware' in e['dataset'][0]:85 fw = e['dataset'][0]['firmware']86 return [manufacturer, fw]87def calculate_meta(cols):88 meta = {89 'total': len(cols),90 'total_tpm': 0,91 'supported_algorithms': {},92 'supported_commands': {},93 'supported_ecc': {}94 }95 for column in cols:96 if not column['dataset'][0]['no_tpm']:97 meta['total_tpm'] += 198 for algorithm in column['dataset'][0]['supported_algorithms']:99 if algorithm in meta['supported_algorithms']:100 meta['supported_algorithms'][algorithm] += 1101 else:102 meta['supported_algorithms'][algorithm] = 1103 for command in column['dataset'][0]['supported_commands']:104 if command in meta['supported_commands']:105 meta['supported_commands'][command] += 1106 else:107 meta['supported_commands'][command] = 1108 for ecc in column['dataset'][0]['supported_ecc']:109 if ecc in meta['supported_ecc']:110 meta['supported_ecc'][ecc] += 1111 else:112 meta['supported_ecc'][ecc] = 1113 return meta114def get_revisions():115 revs = {116 'algorithms': {},117 'commands': {},118 'ecc': {}119 }120 for algorithm in constatnts.supported_algorithms:121 revs['algorithms'][algorithm] = alg_revision(constatnts.supported_algorithms[algorithm])122 for command in constatnts.supported_commands:123 revs['commands'][command] = com_revision(constatnts.supported_commands[command])124 for ec in constatnts.supported_ecc:125 revs['ecc'][ec] = ecc_revision(constatnts.supported_ecc[ec])126 return revs127def alg_revision(name):128 if name in ['TPM_ALG_CAMELLIA']:129 return '1.22'130 if name in ['TPM_ALG_TDES', 'TPM_ALG_SHA3_256', 'TPM_ALG_SHA3_384', 'TPM_ALG_SHA3_512']:131 return '1.24'132 if name in ['TPM_ALG_CMAC']:133 return '1.27'134 if name in ['TPM_ALG_CCM', 'TPM_ALG_GCM', 'TPM_ALG_KW', 'TPM_ALG_KWP', 'TPM_ALG_EAX', 'TPM_ALG_EDDSA']:135 return '1.32'136 if name in []:137 return 'unknown'138 return '1.15'139def com_revision(name):140 if name in ['CC_PolicyNvWritten']:141 return '0.99'142 if name in []:143 return '1.16'144 if name in ['CC_PolicyTemplate', 'CC_CreateLoaded', 'CC_PolicyAuthorizeNV', 'CC_Vendor_TCG_Test', 'CC_EncryptDecrypt2']:145 return '1.38'146 if name in ['CC_AC_GetCapability', 'CC_AC_Send', 'CC_Policy_AC_SendSelect']:147 return '1.59'148 if name in []:149 return 'unknown'150 return '0.96'151def ecc_revision(name):152 if name in []:153 return '1.22'154 if name in []:155 return '1.24'156 if name in []:157 return '1.27'158 if name in ['TPM_ECC_BP_P256_R1', 'TPM_ECC_BP_P384_R1', 'TPM_ECC_BP_P512_R1', 'TPM_ECC_CURVE_25519']:159 return '1.32'160 if name in []:161 return 'unknown'162 return '1.15'163def create_supporting_files(cols, out):164 # unused code, but it does generate some general statistics165 stats = {166 'manufacturer': {},167 'firmware': {},168 'revision': {},169 'family': {}170 }171 for col in cols:172 if col['dataset'][0]['manufacturer'] not in stats['manufacturer']:173 stats['manufacturer'][col['dataset'][0]['manufacturer']] = 1174 else:175 stats['manufacturer'][col['dataset'][0]['manufacturer']] += 1176 if col['dataset'][0]['manufacturer'] not in stats['firmware']:177 stats['firmware'][col['dataset'][0]['manufacturer']] = {}178 if col['dataset'][0]['firmware'] not in stats['firmware'][col['dataset'][0]['manufacturer']]:179 stats['firmware'][col['dataset'][0]['manufacturer']][col['dataset'][0]['firmware']] = 1180 else:181 stats['firmware'][col['dataset'][0]['manufacturer']][col['dataset'][0]['firmware']] += 1182 if 'TPMx_PT_REVISION' in col['dataset'][0]['properties_fixed']:183 if col['dataset'][0]['properties_fixed']['TPMx_PT_REVISION'] not in stats['revision']:184 stats['revision'][col['dataset'][0]['properties_fixed']['TPMx_PT_REVISION']] = 1185 else:186 stats['revision'][col['dataset'][0]['properties_fixed']['TPMx_PT_REVISION']] += 1187 if 'TPMx_PT_FAMILY_INDICATOR' in col['dataset'][0]['properties_fixed']:188 if col['dataset'][0]['properties_fixed']['TPMx_PT_FAMILY_INDICATOR'] not in stats['family']:189 stats['family'][col['dataset'][0]['properties_fixed']['TPMx_PT_FAMILY_INDICATOR']] = 1190 else:191 stats['family'][col['dataset'][0]['properties_fixed']['TPMx_PT_FAMILY_INDICATOR']] += 1192 with open(os.path.join(out, 'data.json'), 'w') as support_file:193 support_file.write(json.dumps(cols))194 with open(os.path.join(out, 'meta.json'), 'w') as support_file:195 support_file.write(json.dumps(calculate_meta(cols)))196 with open(os.path.join(out, 'revision.json'), 'w') as support_file:197 support_file.write(json.dumps(get_revisions()))198 lists = os.path.join(out, 'lists')199 os.makedirs(lists, exist_ok=True)200 with open(os.path.join(lists, 'supported_algorithms.json'), 'w') as support_file:201 support_file.write(json.dumps(Data.global_supported_algorithms))202 with open(os.path.join(lists, 'supported_commands.json'), 'w') as support_file:203 support_file.write(json.dumps(Data.global_supported_commands))204 with open(os.path.join(lists, 'supported_ecc.json'), 'w') as support_file:205 support_file.write(json.dumps(Data.global_supported_ecc))206 with open(os.path.join(lists, 'fixed_properties.json'), 'w') as support_file:207 support_file.write(json.dumps(Data.global_properties_fixed))208 with open(os.path.join(lists, 'performance_metrics.json'), 'w') as support_file:209 support_file.write(json.dumps(Data.global_performance))210 dictionary = os.path.join(out, 'dictionary')211 os.makedirs(dictionary, exist_ok=True)212 with open(os.path.join(dictionary, 'algorithms.json'), 'w') as support_file:213 support_file.write(json.dumps(constatnts.supported_algorithms))214 with open(os.path.join(dictionary, 'commands.json'), 'w') as support_file:215 support_file.write(json.dumps(constatnts.supported_commands))216 with open(os.path.join(dictionary, 'ecc.json'), 'w') as support_file:217 support_file.write(json.dumps(constatnts.supported_ecc))218 copy('distributable/index.html', out)219 copy('distributable/script.js', out)220 copy('distributable/styles.css', out)221if __name__ == "__main__":222 arg_parser = create_arg_parser()223 parsed_args = arg_parser.parse_args(sys.argv[1:])224 if os.path.exists(parsed_args.input):225 output = parsed_args.output226 if not os.path.exists(output):227 try:228 os.makedirs(output)229 except OSError:230 print("Creation of the directory %s failed" % output)231 exit(1)232 records = scan_directory(parsed_args.input, parsed_args.check_for_unzips)233 columns = []234 for record in records:235 record.find_type()236 record.get_meta()237 record.get_results()238 record.get_performance()239 columns.append(record.get_col())240 # sort lists241 sort_lists()242 sort_results(columns)243 create_supporting_files(columns, output)244 else:245 print("Input directory does not exist.")...

Full Screen

Full Screen

test_env.py

Source:test_env.py Github

copy

Full Screen

...17 self.output = ''18 def write(self, chunk):19 self.output += chunk.decode('utf-8')20def get_simple_environment():21 return env.from_file(utils.support_file('simple.yml'))22class from_file_TestCase(unittest.TestCase):23 def test_returns_Environment(self):24 e = get_simple_environment()25 self.assertIsInstance(e, env.Environment)26 def test_retains_full_filename(self):27 e = get_simple_environment()28 self.assertEqual(utils.support_file('simple.yml'), e.filename)29 def test_with_pip(self):30 e = env.from_file(utils.support_file('with-pip.yml'))31 self.assert_('pip' in e.dependencies)32 self.assert_('foo' in e.dependencies['pip'])33 self.assert_('baz' in e.dependencies['pip'])34class EnvironmentTestCase(unittest.TestCase):35 def test_has_empty_filename_by_default(self):36 e = env.Environment()37 self.assertEqual(e.filename, None)38 def test_has_filename_if_provided(self):39 r = random.randint(100, 200)40 random_filename = '/path/to/random/environment-{}.yml'.format(r)41 e = env.Environment(filename=random_filename)42 self.assertEqual(e.filename, random_filename)43 def test_has_empty_name_by_default(self):44 e = env.Environment()45 self.assertEqual(e.name, None)46 def test_has_name_if_provided(self):47 random_name = 'random-{}'.format(random.randint(100, 200))48 e = env.Environment(name=random_name)49 self.assertEqual(e.name, random_name)50 def test_dependencies_are_empty_by_default(self):51 e = env.Environment()52 self.assertEqual(0, len(e.dependencies))53 def test_parses_dependencies_from_raw_file(self):54 e = get_simple_environment()55 expected = OrderedDict([('conda', ['nltk'])])56 self.assertEqual(e.dependencies, expected)57 def test_builds_spec_from_line_raw_dependency(self):58 # TODO Refactor this inside conda to not be a raw string59 e = env.Environment(dependencies=['nltk=3.0.0=np18py27'])60 expected = OrderedDict([('conda', ['nltk 3.0.0 np18py27'])])61 self.assertEqual(e.dependencies, expected)62 def test_args_are_wildcarded(self):63 e = env.Environment(dependencies=['python=2.7'])64 expected = OrderedDict([('conda', ['python 2.7*'])])65 self.assertEqual(e.dependencies, expected)66 def test_other_tips_of_dependencies_are_supported(self):67 e = env.Environment(68 dependencies=['nltk', {'pip': ['foo', 'bar']}]69 )70 expected = OrderedDict([71 ('conda', ['nltk']),72 ('pip', ['foo', 'bar'])73 ])74 self.assertEqual(e.dependencies, expected)75 def test_channels_default_to_empty_list(self):76 e = env.Environment()77 self.assertIsInstance(e.channels, list)78 self.assertEqual(e.channels, [])79 def test_add_channels(self):80 e = env.Environment()81 e.add_channels(['dup', 'dup', 'unique'])82 self.assertEqual(e.channels, ['dup', 'unique'])83 def test_remove_channels(self):84 e = env.Environment(channels=['channel'])85 e.remove_channels()86 self.assertEqual(e.channels, [])87 def test_channels_are_provided_by_kwarg(self):88 random_channels = (random.randint(100, 200), random)89 e = env.Environment(channels=random_channels)90 self.assertEqual(e.channels, random_channels)91 def test_to_dict_returns_dictionary_of_data(self):92 random_name = 'random{}'.format(random.randint(100, 200))93 e = env.Environment(94 name=random_name,95 channels=['javascript'],96 dependencies=['nodejs']97 )98 expected = {99 'name': random_name,100 'channels': ['javascript'],101 'dependencies': ['nodejs']102 }103 self.assertEqual(e.to_dict(), expected)104 def test_to_dict_returns_just_name_if_only_thing_present(self):105 e = env.Environment(name='simple')106 expected = {'name': 'simple'}107 self.assertEqual(e.to_dict(), expected)108 def test_to_yaml_returns_yaml_parseable_string(self):109 random_name = 'random{}'.format(random.randint(100, 200))110 e = env.Environment(111 name=random_name,112 channels=['javascript'],113 dependencies=['nodejs']114 )115 expected = {116 'name': random_name,117 'channels': ['javascript'],118 'dependencies': ['nodejs']119 }120 actual = yaml.load(StringIO(e.to_yaml()))121 self.assertEqual(expected, actual)122 def test_to_yaml_returns_proper_yaml(self):123 random_name = 'random{}'.format(random.randint(100, 200))124 e = env.Environment(125 name=random_name,126 channels=['javascript'],127 dependencies=['nodejs']128 )129 expected = '\n'.join([130 "name: %s" % random_name,131 "channels:",132 "- javascript",133 "dependencies:",134 "- nodejs",135 ""136 ])137 actual = e.to_yaml()138 self.assertEqual(expected, actual)139 def test_to_yaml_takes_stream(self):140 random_name = 'random{}'.format(random.randint(100, 200))141 e = env.Environment(142 name=random_name,143 channels=['javascript'],144 dependencies=['nodejs']145 )146 s = FakeStream()147 e.to_yaml(stream=s)148 expected = "\n".join([149 'name: %s' % random_name,150 'channels:',151 '- javascript',152 'dependencies:',153 '- nodejs',154 '',155 ])156 self.assertEqual(expected, s.output)157 def test_can_add_dependencies_to_environment(self):158 e = get_simple_environment()159 e.dependencies.add('bar')160 s = FakeStream()161 e.to_yaml(stream=s)162 expected = "\n".join([163 'name: nlp',164 'dependencies:',165 '- nltk',166 '- bar',167 ''168 ])169 self.assertEqual(expected, s.output)170 def test_dependencies_update_after_adding(self):171 e = get_simple_environment()172 self.assert_('bar' not in e.dependencies['conda'])173 e.dependencies.add('bar')174 self.assert_('bar' in e.dependencies['conda'])175class DirectoryTestCase(unittest.TestCase):176 directory = utils.support_file('example')177 def setUp(self):178 self.original_working_dir = os.getcwd()179 self.env = env.load_from_directory(self.directory)180 def tearDown(self):181 os.chdir(self.original_working_dir)182 def test_returns_env_object(self):183 self.assertIsInstance(self.env, env.Environment)184 def test_has_expected_name(self):185 self.assertEqual('test', self.env.name)186 def test_has_dependencies(self):187 self.assertEqual(1, len(self.env.dependencies['conda']))188 self.assert_('numpy' in self.env.dependencies['conda'])189class load_from_directory_example_TestCase(DirectoryTestCase):190 directory = utils.support_file('example')191class load_from_directory_example_yaml_TestCase(DirectoryTestCase):192 directory = utils.support_file('example-yaml')193class load_from_directory_recursive_TestCase(DirectoryTestCase):194 directory = utils.support_file('foo/bar')195class load_from_directory_recursive_two_TestCase(DirectoryTestCase):196 directory = utils.support_file('foo/bar/baz')197class load_from_directory_trailing_slash_TestCase(DirectoryTestCase):198 directory = utils.support_file('foo/bar/baz/')199class load_from_directory_TestCase(unittest.TestCase):200 def test_raises_when_unable_to_find(self):201 with self.assertRaises(exceptions.EnvironmentFileNotFound):202 env.load_from_directory('/path/to/unknown/env-spec')203 def test_raised_exception_has_environment_yml_as_file(self):204 with self.assertRaises(exceptions.EnvironmentFileNotFound) as e:205 env.load_from_directory('/path/to/unknown/env-spec')206 self.assertEqual(e.exception.filename, 'environment.yml')207class LoadEnvFromFileAndSaveTestCase(unittest.TestCase):208 env_path = utils.support_file(os.path.join('saved-env', 'environment.yml'))209 def setUp(self):210 with open(self.env_path, "rb") as fp:211 self.original_file_contents = fp.read()212 self.env = env.load_from_directory(self.env_path)213 def tearDown(self):214 with open(self.env_path, "wb") as fp:215 fp.write(self.original_file_contents)216 def test_expected_default_conditions(self):217 self.assertEqual(1, len(self.env.dependencies['conda']))218 def test(self):219 self.env.dependencies.add('numpy')220 self.env.save()221 e = env.load_from_directory(self.env_path)222 self.assertEqual(2, len(e.dependencies['conda']))223 self.assert_('numpy' in e.dependencies['conda'])224class EnvironmentSaveTestCase(unittest.TestCase):225 env_file = utils.support_file('saved.yml')226 def tearDown(self):227 if os.path.exists(self.env_file):228 os.unlink(self.env_file)229 def test_creates_file_on_save(self):230 self.assertFalse(os.path.exists(self.env_file), msg='sanity check')231 e = env.Environment(filename=self.env_file, name='simple')232 e.save()233 self.assertTrue(os.path.exists(self.env_file))234 def _test_saves_yaml_representation_of_file(self):235 e = env.Environment(filename=self.env_file, name='simple')236 e.save()237 with open(self.env_file, "rb") as fp:238 actual = fp.read()239 self.assert_(len(actual) > 0, msg='sanity check')...

Full Screen

Full Screen

test_install_app_support_package.py

Source:test_install_app_support_package.py Github

copy

Full Screen

1import os2import zipfile3from unittest import mock4import pytest5from requests import exceptions as requests_exceptions6from briefcase.commands.create import InvalidSupportPackage7from briefcase.exceptions import NetworkFailure8def test_install_app_support_package(create_command, myapp, tmp_path, support_path):9 "A support package can be downloaded and unpacked where it is needed"10 # Write a temporary support zip file11 support_file = tmp_path / 'out.zip'12 with zipfile.ZipFile(support_file, 'w') as support_zip:13 support_zip.writestr('internal/file.txt', data='hello world')14 # Modify download_url to return the temp zipfile15 create_command.download_url = mock.MagicMock(return_value=support_file)16 # Install the support package17 create_command.install_app_support_package(myapp)18 # Confirm the right URL was used19 create_command.download_url.assert_called_with(20 download_path=create_command.dot_briefcase_path / 'support',21 url='https://briefcase-support.org/python?platform=tester&version=3.X',22 )23 # Confirm that the full path to the support file24 # has been unpacked.25 assert (support_path / 'internal' / 'file.txt').exists()26def test_install_pinned_app_support_package(create_command, myapp, tmp_path, support_path):27 "A pinned support package can be downloaded and unpacked where it is needed"28 # Pin the support revision29 myapp.support_revision = '42'30 # Write a temporary support zip file31 support_file = tmp_path / 'out.zip'32 with zipfile.ZipFile(support_file, 'w') as support_zip:33 support_zip.writestr('internal/file.txt', data='hello world')34 # Modify download_url to return the temp zipfile35 create_command.download_url = mock.MagicMock(return_value=support_file)36 # Install the support package37 create_command.install_app_support_package(myapp)38 # Confirm the right URL was used39 create_command.download_url.assert_called_with(40 download_path=create_command.dot_briefcase_path / 'support',41 url='https://briefcase-support.org/python?platform=tester&version=3.X&revision=42',42 )43 # Confirm that the full path to the support file44 # has been unpacked.45 assert (support_path / 'internal' / 'file.txt').exists()46def test_install_custom_app_support_package_file(create_command, myapp, tmp_path, support_path):47 "A custom support package can be specified as a local file"48 # Provide an app-specific override of the package URL49 myapp.support_package = os.fsdecode(tmp_path / 'custom' / 'support.zip')50 # Write a temporary support zip file51 support_file = tmp_path / 'custom' / 'support.zip'52 support_file.parent.mkdir(parents=True)53 with zipfile.ZipFile(support_file, 'w') as support_zip:54 support_zip.writestr('internal/file.txt', data='hello world')55 # Modify download_url to return the temp zipfile56 create_command.download_url = mock.MagicMock()57 # Install the support package58 create_command.install_app_support_package(myapp)59 # There should have been no download attempt,60 # as the resource is local.61 create_command.download_url.assert_not_called()62 # Confirm that the full path to the support file63 # has been unpacked.64 assert (support_path / 'internal' / 'file.txt').exists()65def test_install_custom_app_support_package_url(create_command, myapp, tmp_path, support_path):66 "A custom support package can be specified as URL"67 # Provide an app-specific override of the package URL68 myapp.support_package = 'https://example.com/custom/support.zip'69 # Write a temporary support zip file70 support_file = tmp_path / 'out.zip'71 with zipfile.ZipFile(support_file, 'w') as support_zip:72 support_zip.writestr('internal/file.txt', data='hello world')73 # Modify download_url to return the temp zipfile74 create_command.download_url = mock.MagicMock(return_value=support_file)75 # Install the support package76 create_command.install_app_support_package(myapp)77 # Confirm the right URL was used78 create_command.download_url.assert_called_with(79 download_path=create_command.dot_briefcase_path / 'support',80 url='https://example.com/custom/support.zip',81 )82 # Confirm that the full path to the support file83 # has been unpacked.84 assert (support_path / 'internal' / 'file.txt').exists()85def test_install_pinned_custom_app_support_package_url(create_command, myapp, tmp_path, support_path):86 "A custom support package can be specified as URL, and pinned to a revision"87 # Pin the support revision88 myapp.support_revision = '42'89 # Provide an app-specific override of the package URL90 myapp.support_package = 'https://example.com/custom/support.zip'91 # Write a temporary support zip file92 support_file = tmp_path / 'out.zip'93 with zipfile.ZipFile(support_file, 'w') as support_zip:94 support_zip.writestr('internal/file.txt', data='hello world')95 # Modify download_url to return the temp zipfile96 create_command.download_url = mock.MagicMock(return_value=support_file)97 # Install the support package98 create_command.install_app_support_package(myapp)99 # Confirm the right URL was used100 create_command.download_url.assert_called_with(101 download_path=create_command.dot_briefcase_path / 'support',102 url='https://example.com/custom/support.zip?revision=42',103 )104 # Confirm that the full path to the support file105 # has been unpacked.106 assert (support_path / 'internal' / 'file.txt').exists()107def test_install_pinned_custom_app_support_package_url_with_args(create_command, myapp, tmp_path, support_path):108 "A custom support package can be specified as URL with args, and pinned to a revision"109 # Pin the support revision110 myapp.support_revision = '42'111 # Provide an app-specific override of the package URL112 myapp.support_package = 'https://example.com/custom/support.zip?cool=Yes'113 # Write a temporary support zip file114 support_file = tmp_path / 'out.zip'115 with zipfile.ZipFile(support_file, 'w') as support_zip:116 support_zip.writestr('internal/file.txt', data='hello world')117 # Modify download_url to return the temp zipfile118 create_command.download_url = mock.MagicMock(return_value=support_file)119 # Install the support package120 create_command.install_app_support_package(myapp)121 # Confirm the right URL was used122 create_command.download_url.assert_called_with(123 download_path=create_command.dot_briefcase_path / 'support',124 url='https://example.com/custom/support.zip?cool=Yes&revision=42',125 )126 # Confirm that the full path to the support file127 # has been unpacked.128 assert (support_path / 'internal' / 'file.txt').exists()129def test_offline_install(create_command, myapp, support_path):130 "If the computer is offline, an error is raised"131 create_command.download_url = mock.MagicMock(132 side_effect=requests_exceptions.ConnectionError133 )134 # Installing while offline raises an error135 with pytest.raises(NetworkFailure):136 create_command.install_app_support_package(myapp)137def test_invalid_support_package(create_command, myapp, tmp_path, support_path):138 "If the support package isn't a valid zipfile, an error is raised"139 # Create a support package that isn't a zipfile140 support_file = tmp_path / 'out.zip'141 with open(support_file, 'w') as bad_support_zip:142 bad_support_zip.write("This isn't a zip file")143 # Make the download URL return the temp file144 create_command.download_url = mock.MagicMock(return_value=support_file)145 # Installing the bad support package raises an error146 with pytest.raises(InvalidSupportPackage):147 create_command.install_app_support_package(myapp)148def test_missing_support_package(create_command, myapp, tmp_path, support_path):149 "If the path provided for the support package is bad, an error is raised"150 # Set a custom support package that doesn't exist151 myapp.support_package = '/path/does/not/exist.zip'152 # Installing the bad support package raises an error153 with pytest.raises(InvalidSupportPackage):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Nose2 automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful