How to use include_partition method in autotest

Best Python code snippet using autotest_python

storage_layers_test.py

Source:storage_layers_test.py Github

copy

Full Screen

1#!/usr/bin/env python2# This software was developed at the National Institute of Standards3# and Technology in whole or in part by employees of the Federal4# Government in the course of their official duties. Pursuant to5# title 17 Section 105 of the United States Code portions of this6# software authored by NIST employees are not subject to copyright7# protection and are in the public domain. For portions not authored8# by NIST employees, NIST has been granted unlimited rights. NIST9# assumes no responsibility whatsoever for its use by other parties,10# and makes no guarantees, expressed or implied, about its quality,11# reliability, or any other characteristic.12#13# We would appreciate acknowledgement if the software is used.14__version__ = "0.3.1"15import os16import sys17import hashlib18import logging19import typing20import dfxml.objects as Objects21import libtest22_logger = logging.getLogger(os.path.basename(__file__))23TEST_BYTE_STRING_1 = b"Test string 1"24TEST_BYTE_STRING_2 = b"Test string 2"25TEST_BYTE_STRING_3 = b"Test string 3"26TEST_BYTE_STRING_4 = b"Test string 4"27TEST_BYTE_STRING_5 = b"Test string 5"28tmphash = hashlib.sha512()29tmphash.update(TEST_BYTE_STRING_1)30TEST_HASH_1 = tmphash.hexdigest()31tmphash = hashlib.sha512()32tmphash.update(TEST_BYTE_STRING_2)33TEST_HASH_2 = tmphash.hexdigest()34tmphash = hashlib.sha512()35tmphash.update(TEST_BYTE_STRING_3)36TEST_HASH_3 = tmphash.hexdigest()37tmphash = hashlib.sha512()38tmphash.update(TEST_BYTE_STRING_4)39TEST_HASH_4 = tmphash.hexdigest()40tmphash = hashlib.sha512()41tmphash.update(TEST_BYTE_STRING_5)42TEST_HASH_5 = tmphash.hexdigest()43def _test_file_in_non_fs_levels_deep(44 include_disk_image : bool,45 include_partition_system : bool,46 include_partition : bool,47 include_file_system : bool48) -> None:49 """50 This test follows a simple, vertical storage layer stack, but adds a file at each layer.51 """52 dobj = Objects.DFXMLObject(version="1.2.0")53 # Add file to top-level document.54 fobj_dobj = Objects.FileObject()55 fobj_dobj.alloc_inode = False56 fobj_dobj.alloc_name = False57 fobj_dobj.sha512 = TEST_HASH_158 dobj.append(fobj_dobj)59 appender_stack : typing.List[Objects.AbstractParentObject] = [dobj]60 if include_disk_image:61 # Add disk image to top-level document.62 diobj = Objects.DiskImageObject()63 appender_stack[-1].append(diobj)64 appender_stack.append(diobj)65 # Add file to disk image.66 fobj_diobj = Objects.FileObject()67 fobj_diobj.alloc_inode = False68 fobj_diobj.alloc_name = False69 fobj_diobj.sha512 = TEST_HASH_270 diobj.append(fobj_diobj)71 if include_partition_system:72 # Add partition system to disk image.73 psobj = Objects.PartitionSystemObject()74 appender_stack[-1].append(psobj)75 appender_stack.append(psobj)76 # Add file to partition system.77 fobj_psobj = Objects.FileObject()78 fobj_psobj.alloc_inode = False79 fobj_psobj.alloc_name = False80 fobj_psobj.sha512 = TEST_HASH_381 psobj.append(fobj_psobj)82 if include_partition:83 # Add partition to partition system, but not disk image.84 if not (include_disk_image and not include_partition_system):85 pobj = Objects.PartitionObject()86 appender_stack[-1].append(pobj)87 appender_stack.append(pobj)88 # Add file to partition.89 fobj_pobj = Objects.FileObject()90 fobj_pobj.alloc_inode = False91 fobj_pobj.alloc_name = False92 fobj_pobj.sha512 = TEST_HASH_493 pobj.append(fobj_pobj)94 if include_file_system:95 # Add file system to anything but a partition system.96 if not (include_partition_system and not include_partition):97 vobj = Objects.VolumeObject()98 appender_stack[-1].append(vobj)99 appender_stack.append(vobj)100 # Add file to file system.101 fobj_vobj = Objects.FileObject()102 fobj_vobj.sha512 = TEST_HASH_5103 vobj.append(fobj_vobj)104 # Do file I/O round trip.105 (tmp_filename, dobj_reconst) = libtest.file_round_trip_dfxmlobject(dobj)106 try:107 container_stack = [dobj_reconst]108 assert dobj_reconst.files[0].sha512 == TEST_HASH_1109 if include_disk_image:110 diobj_reconst = container_stack[-1].disk_images[0]111 container_stack.append(diobj_reconst)112 assert diobj_reconst.files[0].sha512 == TEST_HASH_2113 if include_partition_system:114 psobj_reconst = container_stack[-1].partition_systems[0]115 container_stack.append(psobj_reconst)116 assert psobj_reconst.files[0].sha512 == TEST_HASH_3117 if include_partition:118 if not (include_disk_image and not include_partition_system):119 pobj_reconst = container_stack[-1].partitions[0]120 container_stack.append(pobj_reconst)121 assert pobj_reconst.files[0].sha512 == TEST_HASH_4122 if include_file_system:123 if not (include_partition_system and not include_partition):124 vobj_reconst = container_stack[-1].volumes[0]125 assert vobj_reconst.files[0].sha512 == TEST_HASH_5126 except:127 _logger.debug("tmp_filename = %r." % tmp_filename)128 raise129 os.remove(tmp_filename)130def test_file_in_non_fs_levels_deep() -> None:131 for include_disk_image in [True, False]:132 for include_partition_system in [True, False]:133 for include_partition in [True, False]:134 for include_file_system in [True, False]:135 try:136 _test_file_in_non_fs_levels_deep(137 include_disk_image,138 include_partition_system,139 include_partition,140 include_file_system141 )142 except:143 _logger.debug("include_disk_image = %r." % include_disk_image)144 _logger.debug("include_partition_system = %r." % include_partition_system)145 _logger.debug("include_partition = %r." % include_partition)146 _logger.debug("include_file_system = %r." % include_file_system)147 raise148def _test_file_in_non_fs_levels_flat(149 include_disk_image : bool,150 include_partition_system : bool,151 include_partition : bool,152 include_file_system : bool153) -> None:154 """155 This test follows a simple, horizontal storage layer stack (every container attached to top document object), and adds a file for each container.156 """157 dobj = Objects.DFXMLObject(version="1.2.0")158 # Add file to top-level document.159 fobj_dobj = Objects.FileObject()160 fobj_dobj.alloc_inode = False161 fobj_dobj.alloc_name = False162 fobj_dobj.sha512 = TEST_HASH_1163 dobj.append(fobj_dobj)164 if include_disk_image:165 # Add disk image.166 diobj = Objects.DiskImageObject()167 dobj.append(diobj)168 # Add file to disk image.169 fobj_diobj = Objects.FileObject()170 fobj_diobj.alloc_inode = False171 fobj_diobj.alloc_name = False172 fobj_diobj.sha512 = TEST_HASH_2173 diobj.append(fobj_diobj)174 if include_partition_system:175 # Add partition system.176 psobj = Objects.PartitionSystemObject()177 dobj.append(psobj)178 # Add file to partition system.179 fobj_psobj = Objects.FileObject()180 fobj_psobj.alloc_inode = False181 fobj_psobj.alloc_name = False182 fobj_psobj.sha512 = TEST_HASH_3183 psobj.append(fobj_psobj)184 if include_partition:185 # Add partition.186 pobj = Objects.PartitionObject()187 dobj.append(pobj)188 # Add file to partition.189 fobj_pobj = Objects.FileObject()190 fobj_pobj.alloc_inode = False191 fobj_pobj.alloc_name = False192 fobj_pobj.sha512 = TEST_HASH_4193 pobj.append(fobj_pobj)194 if include_file_system:195 # Add file system.196 vobj = Objects.VolumeObject()197 dobj.append(vobj)198 # Add file to file system.199 fobj_vobj = Objects.FileObject()200 fobj_vobj.sha512 = TEST_HASH_5201 vobj.append(fobj_vobj)202 # Do file I/O round trip.203 (tmp_filename, dobj_reconst) = libtest.file_round_trip_dfxmlobject(dobj)204 try:205 assert dobj_reconst.files[0].sha512 == TEST_HASH_1206 if include_disk_image:207 diobj_reconst = dobj_reconst.disk_images[0]208 assert diobj_reconst.files[0].sha512 == TEST_HASH_2209 if include_partition_system:210 psobj_reconst = dobj_reconst.partition_systems[0]211 assert psobj_reconst.files[0].sha512 == TEST_HASH_3212 if include_partition:213 pobj_reconst = dobj_reconst.partitions[0]214 assert pobj_reconst.files[0].sha512 == TEST_HASH_4215 if include_file_system:216 vobj_reconst = dobj_reconst.volumes[0]217 assert vobj_reconst.files[0].sha512 == TEST_HASH_5218 except:219 _logger.debug("tmp_filename = %r." % tmp_filename)220 raise221 os.remove(tmp_filename)222def test_file_in_non_fs_levels_flat() -> None:223 for include_disk_image in [True, False]:224 for include_partition_system in [True, False]:225 for include_partition in [True, False]:226 for include_file_system in [True, False]:227 try:228 _test_file_in_non_fs_levels_flat(229 include_disk_image,230 include_partition_system,231 include_partition,232 include_file_system233 )234 except:235 _logger.debug("include_disk_image = %r." % include_disk_image)236 _logger.debug("include_partition_system = %r." % include_partition_system)237 _logger.debug("include_partition = %r." % include_partition)238 _logger.debug("include_file_system = %r." % include_file_system)239 raise240def test_solaris_ps_in_partition() -> None:241 dobj = Objects.DFXMLObject(version="1.2.0")242 psobj_outer = Objects.PartitionSystemObject()243 dobj.append(psobj_outer)244 # Add file to outer partition system.245 fobj_psobj_outer = Objects.FileObject()246 fobj_psobj_outer.alloc_inode = False247 fobj_psobj_outer.alloc_name = False248 fobj_psobj_outer.sha512 = TEST_HASH_1249 psobj_outer.append(fobj_psobj_outer)250 pobj = Objects.PartitionObject()251 psobj_outer.append(pobj)252 # Add file to partition.253 fobj_pobj = Objects.FileObject()254 fobj_pobj.alloc_inode = False255 fobj_pobj.alloc_name = False256 fobj_pobj.sha512 = TEST_HASH_2257 pobj.append(fobj_pobj)258 psobj_inner = Objects.PartitionSystemObject()259 pobj.append(psobj_inner)260 # Add file to inner partition system.261 fobj_psobj_inner = Objects.FileObject()262 fobj_psobj_inner.alloc_inode = False263 fobj_psobj_inner.alloc_name = False264 fobj_psobj_inner.sha512 = TEST_HASH_3265 psobj_inner.append(fobj_psobj_inner)266 # Do file I/O round trip.267 (tmp_filename, dobj_reconst) = libtest.file_round_trip_dfxmlobject(dobj)268 try:269 psobj_outer_reconst = dobj_reconst.partition_systems[0]270 pobj_reconst = psobj_outer_reconst.partitions[0]271 psobj_inner_reconst = pobj_reconst.partition_systems[0]272 assert psobj_outer_reconst.files[0].sha512 == TEST_HASH_1273 assert pobj_reconst.files[0].sha512 == TEST_HASH_2274 assert psobj_inner_reconst.files[0].sha512 == TEST_HASH_3275 except:276 _logger.debug("tmp_filename = %r." % tmp_filename)277 raise278 os.remove(tmp_filename)279def test_partition_in_partition() -> None:280 #TODO Remove "+" on DFXML Schema 1.3.0 tracking.281 dobj = Objects.DFXMLObject(version="1.2.0+")282 psobj = Objects.PartitionSystemObject()283 psobj.pstype_str = "mbr"284 dobj.append(psobj)285 pobj_outer = Objects.PartitionObject()286 psobj.append(pobj_outer)287 pobj_inner = Objects.PartitionObject()288 pobj_outer.append(pobj_inner)289 # Do file I/O round trip.290 (tmp_filename, dobj_reconst) = libtest.file_round_trip_dfxmlobject(dobj)291 try:292 psobj_reconst = dobj_reconst.partition_systems[0]293 pobj_outer_reconst = psobj_reconst.partitions[0]294 pobj_inner_reconst = pobj_outer_reconst.partitions[0]295 assert isinstance(pobj_inner_reconst, Objects.PartitionObject)296 except:297 _logger.debug("tmp_filename = %r." % tmp_filename)298 raise299 os.remove(tmp_filename)300def test_hfsplus_in_hfs() -> None:301 dobj = Objects.DFXMLObject(version="1.2.0")302 vobj_outer = Objects.VolumeObject()303 vobj_outer.ftype_str = "hfs"304 dobj.append(vobj_outer)305 vobj_inner = Objects.VolumeObject()306 vobj_inner.ftype_str = "hfsplus"307 vobj_outer.append(vobj_inner)308 # Do file I/O round trip.309 (tmp_filename, dobj_reconst) = libtest.file_round_trip_dfxmlobject(dobj)310 try:311 vobj_outer_reconst = dobj_reconst.volumes[0]312 vobj_inner_reconst = vobj_outer_reconst.volumes[0]313 assert isinstance(vobj_inner_reconst, Objects.VolumeObject)314 assert vobj_outer_reconst.ftype_str == "hfs"315 assert vobj_inner_reconst.ftype_str == "hfsplus"316 except:317 _logger.debug("tmp_filename = %r." % tmp_filename)318 raise319 os.remove(tmp_filename)320def test_disk_image_in_file_system() -> None:321 dobj = Objects.DFXMLObject(version="1.2.0")322 vobj = Objects.VolumeObject()323 vobj.ftype_str = "iso9660"324 dobj.append(vobj)325 fobj_vobj = Objects.FileObject()326 fobj_vobj.sha512 = TEST_HASH_1327 vobj.append(fobj_vobj)328 diobj = Objects.DiskImageObject()329 vobj.append(diobj)330 fobj_diobj = Objects.FileObject()331 fobj_diobj.alloc_inode = False332 fobj_diobj.alloc_name = False333 fobj_diobj.sha512 = TEST_HASH_2334 diobj.append(fobj_diobj)335 # Do file I/O round trip.336 (tmp_filename, dobj_reconst) = libtest.file_round_trip_dfxmlobject(dobj)337 try:338 vobj_reconst = dobj_reconst.volumes[0]339 diobj_reconst = vobj_reconst.disk_images[0]340 assert vobj_reconst.files[0].sha512 == TEST_HASH_1341 assert diobj_reconst.files[0].sha512 == TEST_HASH_2342 except:343 _logger.debug("tmp_filename = %r." % tmp_filename)344 raise...

Full Screen

Full Screen

ps1.py

Source:ps1.py Github

copy

Full Screen

1###########################2# 6.00.2x Problem Set 1: Space Cows 3from ps1_partition import get_partitions4import time5import datetime6#================================7# Part A: Transporting Space Cows8#================================9def load_cows(filename):10 """11 Read the contents of the given file. Assumes the file contents contain12 data in the form of comma-separated cow name, weight pairs, and return a13 dictionary containing cow names as keys and corresponding weights as values.14 Parameters:15 filename - the name of the data file as a string16 Returns:17 a dictionary of cow name (string), weight (int) pairs18 """19 cow_dict = dict()20 f = open(filename, 'r')21 22 for line in f:23 line_data = line.split(',')24 cow_dict[line_data[0]] = int(line_data[1])25 return cow_dict26# Problem 127def greedy_cow_transport(cows,limit=10):28 """29 Uses a greedy heuristic to determine an allocation of cows that attempts to30 minimize the number of spaceship trips needed to transport all the cows. The31 returned allocation of cows may or may not be optimal.32 The greedy heuristic should follow the following method:33 1. As long as the current trip can fit another cow, add the largest cow that will fit34 to the trip35 2. Once the trip is full, begin a new trip to transport the remaining cows36 Does not mutate the given dictionary of cows.37 Parameters:38 cows - a dictionary of name (string), weight (int) pairs39 limit - weight limit of the spaceship (an int)40 41 Returns:42 A list of lists, with each inner list containing the names of cows43 transported on a particular trip and the overall list containing all the44 trips45 """46 start = time.perf_counter_ns()47 trips = []48 # count = 049 moved_cows = []50 sorted_cows = sorted(cows.items(), key=lambda kv: kv[1], reverse=True)51 # print(sorted_cows)52 while len(moved_cows) < len(cows):53 current_weight = 054 cargo = []55 for cow_name, weight in sorted_cows:56 if cow_name not in moved_cows:57 # count += 158 if current_weight + weight <= limit:59 # del sorted_cows[count - 1]60 current_weight += weight61 cargo.append(cow_name)62 moved_cows.append(cow_name)63 trips.append(cargo)64 end = time.perf_counter_ns() - start65 print(end / 1000000) # convert nanoseconds to milliseconds66 return trips67# Problem 268def brute_force_cow_transport(cows,limit=10):69 """70 Finds the allocation of cows that minimizes the number of spaceship trips71 via brute force. The brute force algorithm should follow the following method:72 1. Enumerate all possible ways that the cows can be divided into separate trips73 2. Select the allocation that minimizes the number of trips without making any trip74 that does not obey the weight limitation75 76 Does not mutate the given dictionary of cows.77 Parameters:78 cows - a dictionary of name (string), weight (int) pairs79 limit - weight limit of the spaceship (an int)80 81 Returns:82 A list of lists, with each inner list containing the names of cows83 transported on a particular trip and the overall list containing all the84 trips85 """86 trips = []87 names = []88 count = 089 start = time.perf_counter_ns()90 # start = datetime.datetime.now()91 # start = time.time()92 for partition in get_partitions(sorted(cows.items(), key=lambda kv: kv[1], reverse=True)):93 # print("partition created", partition)94 count += 195 include_partition = True96 for cow_name_array in partition:97 if sum([c[1] for c in cow_name_array]) > limit:98 include_partition = False99 break100 else:101 names.append([c[0] for c in cow_name_array])102 if include_partition:103 print("Valid partition: ", partition)104 trips = names105 names = []106 # break after first valid partition is found, which contains the least about of trips107 break108 end = time.perf_counter_ns() - start109 # end = datetime.datetime.now() - start110 # end = time.time()111 # print(end - start)112 print(end)113 print(end / 1000000) # convert nanoseconds to milliseconds114 print("brute force1", count)115 return trips116def brute_force_cow_transport_2(cows,limit=10):117 trip = []118 count = 0119 # start = datetime.datetime.now()120 # start = time.time()121 start = time.perf_counter_ns()122 # for partition in sorted(get_partitions(sorted(cows.items(), key=lambda kv: kv[1], reverse=True)), reverse=True):123 # for partition in get_partitions(sorted(cows.items(), key=lambda kv: kv[1], reverse=True)):124 for partition in sorted(get_partitions(cows.items()), key=lambda x: len(x)):125 # print("partition created", partition)126 count += 1127 include_partition = True128 for cow_name_array in partition:129 total_weight = 0130 names = []131 for c in cow_name_array:132 name, weight = c133 total_weight += weight134 if total_weight > limit:135 include_partition = False136 break137 else:138 names.append(name)139 if not include_partition:140 break141 else:142 trip.append(names)143 # if sum([c[1] for c in cow_name_array]) > limit:144 # include_partition = False145 # else:146 # names = [c[0] for c in cow_name_array]147 if include_partition:148 print("Valid partition: ", partition)149 # trip.append(names)150 # break after first valid partition is found, which contains the least about of trips151 break152 end = time.perf_counter_ns() - start153 # end = datetime.datetime.now() - start154 # end = time.time()155 # print(end - start)156 print(end)157 print(end / 1000000) # convert nanoseconds to milliseconds158 print("brute force 2", count)159 return trip160def brute_force_cow_transport_clean(cows, limit=10):161 start = time.perf_counter_ns()162 trip = []163 for partition in sorted(get_partitions(cows.items()), key=lambda x: len(x)):164 include_partition = True165 trip = []166 for cow_name_array in partition:167 total_weight = 0168 names = []169 for c in cow_name_array:170 name, weight = c171 total_weight += weight172 if total_weight > limit:173 include_partition = False174 break175 else:176 names.append(name)177 if not include_partition:178 break179 else:180 trip.append(names)181 if include_partition:182 # all_options.append(trip)183 # break after first valid partition is found, which contains the least about of trips184 break185 end = time.perf_counter_ns() - start186 print(end / 1000000) # convert nanoseconds to milliseconds187 return trip188# Problem 3189def compare_cow_transport_algorithms():190 """191 Using the data from ps1_cow_data.txt and the specified weight limit, run your192 greedy_cow_transport and brute_force_cow_transport functions here. Use the193 default weight limits of 10 for both greedy_cow_transport and194 brute_force_cow_transport.195 196 Print out the number of trips returned by each method, and how long each197 method takes to run in seconds.198 Returns:199 Does not return anything.200 """201 # TODO: Your code here202 pass203"""204Here is some test data for you to see the results of your algorithms with. 205Do not submit this along with any of your answers. Uncomment the last two206lines to print the result of your problem.207"""208cows = load_cows("ps1_cow_data.txt")209limit=10210print(cows)211print(greedy_cow_transport(cows, limit))212# print(brute_force_cow_transport(cows, limit))213# print(brute_force_cow_transport_2(cows, limit))214print(brute_force_cow_transport_clean(cows, limit))215# cows = {'Lotus': 40, 'Horns': 25, 'Boo': 20, 'Milkshake': 40, 'MooMoo': 50, 'Miss Bella': 25}216# print(brute_force_cow_transport_2(cows, 100))217# print(brute_force_cow_transport_clean(cows, 100))218'''219cargo = []220part_cargo_count = 0221for cow_name_array in partition:222 # print(cow_name_array)223 current_weight = 0224 for cow_name in cow_name_array:225 current_weight += cows[cow_name]226 # current_weight = [c[1] for c in cows if c[0] == cow_name]227 # cargo_weight += current_weight228 cargo.append(cow_name)229 if current_weight > limit:230 cargo = []231 break232 if len(cargo) == 0:233 break234 part_cargo_count += 1235if part_cargo_count == len(partition):236 trips.append(partition)237 238 # print(count) # 115975239 # print(cows)240 start = time.time()241 for partition in get_partitions(sorted(cows.items(), key=lambda kv: kv[1], reverse=True)):242 # print(partition)243 count += 1244 end = time.time()245 print(end - start) # 0.5056478977203369246 print(count) # 115975247 print(cows) 248 249 250 251 if sum([c[1] for c in cow_name_array]) <= limit:252 trips.append([c[0] for c in cow_name_array])253 else:254 ...

Full Screen

Full Screen

all_untampered_bulk_primary_cells_with_data.py

Source:all_untampered_bulk_primary_cells_with_data.py Github

copy

Full Screen

1####################################################################################2# This creates a list of experiments corresponding to untampered bulk experiments.3# By 'untampered' I mean all experiments that have been treated in such a way that4# a purposeful change in gene expression was induced. This does not include5# experiments that have been transfected with a control vector.6####################################################################################7from optparse import OptionParser8import json9import graph_lib10from graph_lib import graph11import kallisto_quantified_data_manager_hdf5_py3 as kqdm12EXCLUDE_TAGS = set([13 "experimental_treatment",14 "in_vitro_differentiated_cells",15 "stimulation",16 "infected",17 "diseased_cells",18 "alternate_assay",19 "cell_line",20 "single_cell",21 "tissue"22])23EXCEPT_EXCLUDE_TAGS = set([24 "transduced_control",25 "transfected_control",26 "sirna_treatment_control",27 "shrna_treatment_control"28])29def main():30 usage = "" # TODO 31 parser = OptionParser(usage=usage)32 parser.add_option("-o", "--out_file", help="Output file")33 (options, args) = parser.parse_args()34 annotation_f = args[0]35 out_f = options.out_file 36 with open(annotation_f, 'r') as f:37 annotation = json.load(f)38 tags_f = "/ua/mnbernstein/projects/tbcp/phenotyping/manage_data/tags/tags.json"39 tags_graph = import_tags_graph(tags_f)40 exclude_tags = set(EXCLUDE_TAGS)41 for ex_tag in EXCLUDE_TAGS:42 exclude_tags.update(tags_graph.ancestor_nodes(ex_tag))43 include_experiments = set()44 for annot_data in annotation['annotated_studies']: 45 for partition in annot_data['partitions']:46 include_partition = True47 for tag in partition['tags']:48 if tag not in EXCEPT_EXCLUDE_TAGS and tag in exclude_tags:49 include_partition = False50 break51 if include_partition:52 include_experiments.update(partition['experiments'])53 exps_w_data = set(kqdm.filter_for_experiments_in_db(include_experiments))54 include_experiments = set(include_experiments) & exps_w_data55 # Remove all experiments that have a NaN count value56 exps_list, data_matrix = kqdm.get_transcript_counts_for_experiments(57 include_experiments58 )59 found_nan_exps = set()60 for vec_i, vec in enumerate(data_matrix):61 if (vec_i + 1) % 100 == 0:62 print("Checked {}/{} vectors...".format(vec_i+1, len(data_matrix)))63 sum_vec = sum(vec)64 if sum_vec == 0.0:65 print("Experiment {} has a sum of zero...".format(exps_list[vec_i]))66 found_nan_exps.add(exps_list[vec_i])67 include_experiments = include_experiments - found_nan_exps 68 69 with open(out_f, 'w') as f:70 f.write(json.dumps(71 {72 "list_name": "all_untampered_bulk_primary_cells_with_data",73 "description": "These are all experiments that are not labelled with {}, but allows those labelled with {}.".format(74 list(EXCLUDE_TAGS), 75 list(EXCEPT_EXCLUDE_TAGS)76 ),77 "experiments": list(include_experiments)78 },79 indent=480 )) 81def import_tags_graph(tags_f):82 with open(tags_f, 'r') as f:83 tags_data = json.load(f)84 tags_nodes = tags_data['definitions']85 tags_source_to_targets = {86 x: []87 for x in tags_nodes88 }89 tags_source_to_targets.update(tags_data['implications'])90 tags_graph = graph.DirectedAcyclicGraph(tags_source_to_targets)91 return tags_graph92 93 94if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful