How to use unlock_targets method in yandex-tank

Best Python code snippet using yandex-tank

plugin.py

Source:plugin.py Github

copy

Full Screen

...274 def end_test(self, retcode):275 if retcode != 0:276 self.lp_job.interrupted.set()277 self.__save_conf()278 self.unlock_targets()279 return retcode280 def close_job(self):281 self.lp_job.close(self.retcode)282 def join_threads(self):283 self.lp_job.interrupted.set()284 if self.monitoring.is_alive():285 self.monitoring.join()286 if self.upload.is_alive():287 self.upload.join()288 def stop_events_processing(self):289 self.events_queue.put(None)290 self.events_reader.close()291 self.events_processing.close()292 if self.events_processing.is_alive():293 self.events_processing.join()294 if self.events.is_alive():295 self.lp_job.interrupted.set()296 self.events.join()297 def post_process(self, rc):298 self.retcode = rc299 self.monitoring_queue.put(None)300 self.data_queue.put(None)301 if self.core.error_log:302 self.events_queue.put(None)303 self.events_reader.close()304 self.events_processing.close()305 self.events.join()306 logger.info("Waiting for sender threads to join.")307 if self.monitoring.is_alive():308 self.monitoring.join()309 if self.upload.is_alive():310 self.upload.join()311 self.finished = True312 logger.info(313 "Web link: %s", self.web_link)314 autostop = None315 try:316 autostop = self.core.get_plugin_of_type(AutostopPlugin)317 except KeyError:318 logger.debug("No autostop plugin loaded", exc_info=True)319 if autostop and autostop.cause_criterion:320 self.lp_job.set_imbalance_and_dsc(321 autostop.imbalance_rps, autostop.cause_criterion.explain())322 else:323 logger.debug("No autostop cause detected")324 self.__save_conf()325 return rc326 def on_aggregated_data(self, data, stats):327 """328 @data: aggregated data329 @stats: stats about gun330 """331 if not self.lp_job.interrupted.is_set():332 self.data_queue.put((data, stats))333 def monitoring_data(self, data_list):334 if not self.lp_job.interrupted.is_set():335 if len(data_list) > 0:336 [self.monitoring_queue.put(chunk) for chunk in chop(data_list, self.get_option("chunk_size"))]337 def __send_status(self):338 logger.info('Status sender thread started')339 lp_job = self.lp_job340 while not lp_job.interrupted.is_set():341 try:342 self.lp_job.send_status(self.core.info.get_info_dict())343 time.sleep(self.get_option('send_status_period'))344 except (APIClient.NetworkError, APIClient.NotAvailable) as e:345 logger.warn('Failed to send status')346 logger.debug(e)347 break348 except APIClient.StoppedFromOnline:349 logger.info("Test stopped from Lunapark")350 self.retcode = self.RC_STOP_FROM_WEB351 break352 if self.finished:353 break354 logger.info("Closed Status sender thread")355 def __uploader(self, queue, sender_method, name='Uploader'):356 logger.info('{} thread started'.format(name))357 while not self.lp_job.interrupted.is_set():358 try:359 entry = queue.get(timeout=1)360 if entry is None:361 logger.info("{} queue returned None".format(name))362 break363 sender_method(entry)364 except Empty:365 continue366 except APIClient.StoppedFromOnline:367 logger.warning("Lunapark is rejecting {} data".format(name))368 break369 except (APIClient.NetworkError, APIClient.NotAvailable, APIClient.UnderMaintenance) as e:370 logger.warn('Failed to push {} data'.format(name))371 logger.warn(e)372 self.lp_job.interrupted.set()373 except Exception:374 exc_type, exc_value, exc_traceback = sys.exc_info()375 logger.error("Mysterious exception:\n%s\n%s\n%s", (exc_type, exc_value, exc_traceback))376 break377 # purge queue378 while not queue.empty():379 if queue.get_nowait() is None:380 break381 logger.info("Closing {} thread".format(name))382 def __data_uploader(self):383 self.__uploader(self.data_queue,384 lambda entry: self.lp_job.push_test_data(*entry),385 'Data Uploader')386 def __monitoring_uploader(self):387 self.__uploader(self.monitoring_queue,388 self.lp_job.push_monitoring_data,389 'Monitoring Uploader')390 def __events_uploader(self):391 self.__uploader(self.events_queue,392 self.lp_job.push_events_data,393 'Events Uploader')394 # TODO: why we do it here? should be in core395 def __save_conf(self):396 for requisites, content in self.core.artifacts_to_send:397 self.lp_job.send_config(requisites, content)398 def parse_lock_targets(self):399 # prepare target lock list400 locks_list_cfg = self.get_option('lock_targets', 'auto')401 def no_target():402 logging.warn("Target lock set to 'auto', but no target info available")403 return {}404 locks_set = {self.target} or no_target() if locks_list_cfg == 'auto' else set(locks_list_cfg)405 targets_to_lock = [host for host in locks_set if host]406 return targets_to_lock407 def lock_targets(self, targets_to_lock, ignore, strict):408 locked_targets = [target for target in targets_to_lock409 if self.lp_job.lock_target(target, self.lock_duration, ignore, strict)]410 return locked_targets411 def unlock_targets(self):412 logger.info("Unlocking targets: %s", self.locked_targets)413 for target in self.locked_targets:414 logger.info(target)415 self.lp_job.api_client.unlock_target(target)416 def check_and_lock_targets(self, strict, ignore):417 targets_list = self.parse_lock_targets()418 logger.info('Locking targets: %s', targets_list)419 locked_targets = self.lock_targets(targets_list, ignore=ignore, strict=strict)420 logger.info('Locked targets: %s', locked_targets)421 return locked_targets422 def make_symlink(self, name):423 PLUGIN_DIR = os.path.join(self.core.artifacts_base_dir, 'lunapark')424 if not os.path.exists(PLUGIN_DIR):425 os.makedirs(PLUGIN_DIR)...

Full Screen

Full Screen

svs_lost_during_alignment.py

Source:svs_lost_during_alignment.py Github

copy

Full Screen

1from MA import *2from MSV import *3import random4from sv_util.os_aligners import *5from bokeh.plotting import figure, show6from sv_util.bokeh_style_helper import *7from sv_util.settings import *8from bokeh.plotting import ColumnDataSource9from bokeh.layouts import column, row, grid10from bokeh.models.tools import HoverTool11def choice_adj_size(l, total_len):12 x = random.randrange(total_len)13 idx = 014 while x >= l[idx][0]:15 x -= l[idx][0]16 idx += 117 return l[idx]18def create_reads(pack, size, amount, func_get_seeds_and_read):19 lumper = SeedLumping(ParameterSetManager())20 read_by_name = ReadByName()21 genome_section_by_name = ReadByName()22 points_by_name = {}23 contigs = [(x, y) for x, y in zip(pack.contigLengths(), pack.contigStarts()) if x > size]24 total_len = sum(x for x, _ in contigs)25 def read_and_seeds():26 contig_len, contig_start = choice_adj_size(contigs, total_len)27 start = random.randrange(contig_len - size)28 genome_section = pack.extract_from_to(start+contig_start, start+size+contig_start)29 return func_get_seeds_and_read(genome_section, start + contig_start), genome_section, start + contig_start30 for idx in range(amount):31 read = NucSeq("N")32 while 'n' in str(read) or 'N' in str(read):33 (points, read), genome_section, sec_offset = read_and_seeds()34 read.name = "read" + str(idx)35 genome_section.name = "read" + str(idx)36 read_by_name.append(read)37 genome_section_by_name.append(genome_section)38 points_by_name[read.name] = (points, sec_offset)39 return points_by_name, read_by_name, genome_section_by_name40def create_scattered_read(pack, amount, num_pieces, size_pieces):41 lumper = SeedLumping(ParameterSetManager())42 read_by_name = ReadByName()43 genome_section_by_name = ReadByName()44 points_by_name = {}45 contigs = [(x, y) for x, y in zip(pack.contigLengths(), pack.contigStarts()) if x > size_pieces]46 total_len = sum(x for x, _ in contigs)47 def read_and_points():48 points = []49 read = ""50 for idx in range(0, num_pieces):51 contig_size, contig_start = choice_adj_size(contigs, total_len)52 start = random.randrange(contig_size - size_pieces)53 points.append((len(read), contig_start + start, True))54 points.append((len(read)+size_pieces, contig_start + start + size_pieces, True))55 read += str(pack.extract_from_to(contig_start+start,contig_start+start+size_pieces))56 return points, NucSeq(read)57 for idx in range(amount):58 read = NucSeq("N")59 while 'n' in str(read) or 'N' in str(read):60 points, read = read_and_points()61 read.name = "read" + str(idx)62 read_by_name.append(read)63 genome_section = NucSeq()64 genome_section.name = "read" + str(idx)65 genome_section_by_name.append(genome_section)66 points_by_name[read.name] = (points, 0)67 return points_by_name, read_by_name, genome_section_by_name68def compare(params, data, points_by_name, reads, pack_pledge, fm_index_pledge,69 unlock_targets=None, render_one=True, original_seeds_list=None, ignore_genome_offset=False):70 compare_module = CompareSeedSets(params)71 lumper = SeedLumping(params)72 get_rectangles = SvJumpsFromSeeds(params, pack_pledge.get())73 collector = NucSeqSeedCollector(params)74 if render_one:75 for idx in range(params.get_num_threads()):76 read = reads[idx].get()77 while not read is None:78 #lumped_g_t = lumper.execute( ground_truth[0].get(), read, pack_pledge.get())79 lumped_data = lumper.execute( data[idx].get(), read, pack_pledge.get() )80 #if not original_seeds_list is None:81 # printer = SeedPrinter(params)82 # rectangles = get_rectangles.cpp_module.execute_helper(original_seeds_list[idx].get(),83 # pack_pledge.get(), read)84 # printer.execute( lumped_data, rectangles )85 #else:86 if True:87 printer = SeedPointPrinter(params)88 printer.execute( lumped_data, points_by_name[read.name] )89 exit()90 UnLock(params, unlock_targets[idx]).execute( Container() )91 read = reads[idx].get()92 else:93 res = VectorPledge()94 for idx in range(params.get_num_threads()):95 #lumped_g_t = promise_me(lumper, ground_truth[idx], reads[idx], pack_pledge)96 lumped_data = promise_me(lumper, data[idx], reads[idx], pack_pledge)97 empty = promise_me(collector, reads[idx], lumped_data)98 if unlock_targets is None:99 unlock = comp100 else:101 unlock = promise_me(UnLock(params, unlock_targets[idx]), empty)102 res.append(unlock)103 res.simultaneous_get(params.get_num_threads())104 def matches(seed, point, sec_offset):105 q,r,f = point106 if ignore_genome_offset:107 r -= sec_offset108 def nearby_start(max_diff=5):109 return abs(q-seed.start) <= max_diff and abs(r-seed.start_ref) <= max_diff110 def nearby_end(max_diff=5):111 if seed.on_forward_strand:112 return abs(q-(seed.start+seed.size)) <= max_diff and abs(r-(seed.start_ref+seed.size)) <= max_diff113 else:114 return abs(q-(seed.start+seed.size)) <= max_diff and abs(r-(seed.start_ref-seed.size)) <= max_diff115 return seed.on_forward_strand == f and (nearby_start() or nearby_end())116 all_found = {}117 for name, point_values in points_by_name.items():118 all_found[name] = [False]*len(point_values[0])119 for read, seeds in collector.cpp_module.collection:120 for seed in seeds:121 sec_offset = points_by_name[read.name][1]122 for idx, point in enumerate(points_by_name[read.name][0]):123 if matches(seed, point, sec_offset):124 all_found[read.name][idx] = True125 s = 0126 for point_values in all_found.values():127 all_true = True128 for found in point_values:129 if not found:130 all_true = False131 if all_true:132 s += 1133 #print("hits:", s)134 return s / len(points_by_name)135def compare_seeds(params, reads_by_name, points_by_name, fm_index, pack, mems=True, reseeding=True,136 render_one=False):137 #params.by_name("Number of Threads").set(1)138 #params.by_name("Use all Processor Cores").set(False)139 splitter = NucSeqSplitter(params)140 lock = Lock(params)141 reads_by_name_pledge = Pledge()142 reads_by_name_pledge.set(reads_by_name)143 pack_pledge = Pledge()144 pack_pledge.set(pack)145 fm_index_pledge = Pledge()146 fm_index_pledge.set(fm_index)147 min_len = MinLength(params, params.by_name("Minimal Seed Size SV").get() + 1)148 if mems:149 seeding_module = MinimizerSeeding(params)150 seed_lumping = SeedLumping(params)151 else:152 seeding_module = BinarySeeding(params)153 extract_seeds = ExtractSeeds(params)154 reads_vec = ContainerVectorNucSeq()155 for name, read in reads_by_name:156 reads_vec.append(read)157 reads_vec_pledge = Pledge()158 reads_vec_pledge.set(reads_vec)159 data = []160 reads = []161 unlock_targets = []162 original_seeds_list = []163 read = promise_me(splitter, reads_vec_pledge)164 for _ in range(params.get_num_threads()):165 locked_read = promise_me(lock, read)166 unlock_targets.append(locked_read)167 reads.append(locked_read)168 if mems:169 minimizers = promise_me(seeding_module, fm_index_pledge, locked_read, pack_pledge)170 seeds = promise_me(seed_lumping, minimizers, locked_read, pack_pledge)171 if reseeding:172 soc_module = StripOfConsiderationSeeds(params)173 soc_filter = GetAllFeasibleSoCsAsSet(params)174 recursive_reseeding = RecursiveReseedingSoCs(params, pack)175 socs = promise_me(soc_module, seeds, locked_read, pack_pledge)176 soc_filtered_seeds = promise_me(soc_filter, socs)177 seeds = promise_me(recursive_reseeding, soc_filtered_seeds, pack_pledge, locked_read)178 original_seeds_list.append(seeds)179 else:180 original_seeds_list = None181 else:182 segments = promise_me(seeding_module, fm_index_pledge, locked_read)183 seeds = promise_me(extract_seeds, segments, fm_index_pledge, locked_read)184 original_seeds_list.append(seeds)185 if reseeding:186 recursive_reseeding = RecursiveReseedingSegments(params, pack)187 seeds = promise_me(recursive_reseeding, segments, pack_pledge, fm_index_pledge, locked_read)188 data.append(seeds)189 return compare(params, data, points_by_name, reads, pack_pledge, fm_index_pledge,190 unlock_targets, render_one, original_seeds_list=original_seeds_list)191def compare_nw(params, reads_by_name, genome_section_by_name, points_by_name, pack, render_one=False):192 #params.by_name("Number of Threads").set(1)193 #params.by_name("Use all Processor Cores").set(False)194 splitter = NucSeqSplitter(params)195 lock = Lock(params)196 genome_section_by_name_pledge = Pledge()197 genome_section_by_name_pledge.set(genome_section_by_name)198 pack_pledge = Pledge()199 pack_pledge.set(pack)200 reads_vec = ContainerVectorNucSeq()201 for name, read in reads_by_name:202 reads_vec.append(read)203 reads_vec_pledge = Pledge()204 reads_vec_pledge.set(reads_vec)205 nw_alignment = NWAlignment(params)206 alignment_to_seeds = AlignmentToSeeds(params)207 read_by_name = GetReadByReadName(params)208 data = []209 reads = []210 unlock_targets = []211 read = promise_me(splitter, reads_vec_pledge)212 for _ in range(params.get_num_threads()):213 locked_read = promise_me(lock, read)214 unlock_targets.append(locked_read)215 reads.append(locked_read)216 genome_section_pl = promise_me(read_by_name, locked_read, genome_section_by_name_pledge)217 alignment_pl = promise_me(nw_alignment, locked_read, genome_section_pl)218 seeds_pl = promise_me(alignment_to_seeds, alignment_pl, pack_pledge)219 data.append(seeds_pl)220 return compare(params, data, points_by_name, reads, pack_pledge, None, unlock_targets, render_one,221 ignore_genome_offset=True)222def compare_alignment(params, reads_by_name_pledge, points_by_name, alignments, pack_pledge, fm_index_pledge,223 unlock_targets=None, render_one=False):224 align_to_seeds = AlignmentToSeeds(params)225 get_read_by_name = GetReadByName(params)226 data = []227 reads = []228 for idx in range(params.get_num_threads()):229 alignment_seeds = promise_me(align_to_seeds, alignments[idx], pack_pledge)230 data.append(alignment_seeds)231 read = promise_me(get_read_by_name, alignments[idx], reads_by_name_pledge)232 reads.append(read)233 return compare(params, data, points_by_name, reads, pack_pledge, fm_index_pledge,234 unlock_targets, render_one)235def compare_alignment_from_file_queue(params, reads_by_name, points_by_name, pack, fm_index, queue_pledge,236 render_one=False):237 file_reader = None # no op238 file_reader = SamFileReader(params)239 queue_picker = FilePicker(params)240 queue_placer = FileAlignmentPlacer(params)241 lock = Lock(params)242 reads_by_name_pledge = Pledge()243 reads_by_name_pledge.set(reads_by_name)244 pack_pledge = Pledge()245 pack_pledge.set(pack)246 fm_index_pledge = Pledge()247 fm_index_pledge.set(fm_index)248 alignments = []249 locked_files = []250 for _ in range(params.get_num_threads()):251 picked_file = promise_me(queue_picker, queue_pledge)252 locked_file = promise_me(lock, picked_file)253 alignment_ = promise_me(file_reader, locked_file, pack_pledge, reads_by_name_pledge)254 alignment = promise_me(queue_placer, alignment_, locked_file, queue_pledge)255 locked_files.append(locked_file)256 alignments.append(alignment)257 return compare_alignment(params, reads_by_name_pledge, points_by_name, alignments, pack_pledge, fm_index_pledge,258 locked_files, render_one)259def compare_alignment_from_file_paths(params, reads_by_name, points_by_name, pack, fm_index, file_paths,260 render_one=False):261 if file_paths is None:262 return None263 file_queue = FileQueue()264 for string in file_paths:265 file_queue.add(FileStreamFromPath(string))266 queue_pledge = Pledge()267 queue_pledge.set(file_queue)268 return compare_alignment_from_file_queue(params, reads_by_name, points_by_name, pack, fm_index, queue_pledge,269 render_one)270def create_alignment(read_by_name, aligner, sam_name):271 reads_path = sv_hidden_to_aligners_data_dir + "/reads/" + sam_name + ".fasta"272 with open(reads_path, 'w') as fasta_file:273 for name, read in read_by_name:274 fasta_file.write(">" + name + "\n")275 fasta_file.write(str(read) + "\n")276 json_dict = {"reference_path":human_genome_dir}277 read_json = {"technology":"pb", "name":"n/a", "fasta_file":reads_path}278 path_sam = sv_hidden_to_aligners_data_dir + "/sam/" + sam_name + ".sam"279 aligner(read_json, path_sam, json_dict)280 return [path_sam]281def no_sv(genome_section, ref_start):282 seeds = Seeds()283 seeds.append(Seed(0, len(genome_section), ref_start, True))...

Full Screen

Full Screen

supervisor.py

Source:supervisor.py Github

copy

Full Screen

...132 log.error('Child exited with code %d', p.returncode)133 else:134 log.info('Success!')135 if 'targets' in job_config:136 unlock_targets(job_config)137 return p.returncode138def failure_is_reimage(failure_reason):139 if not failure_reason:140 return False141 reimage_failure = "Error reimaging machines:"142 if reimage_failure in failure_reason:143 return True144 else:145 return False146def check_for_reimage_failures_and_mark_down(targets, count=10):147 # Grab paddles history of jobs in the machine148 # and count the number of reimaging errors149 # if it fails N times then mark the machine down150 base_url = teuth_config.results_server151 for k, _ in targets.items():152 machine = k.split('@')[-1]153 url = urljoin(154 base_url,155 '/nodes/{0}/jobs/?count={1}'.format(156 machine, count)157 )158 resp = requests.get(url)159 jobs = resp.json()160 if len(jobs) < count:161 continue162 reimage_failures = list(filter(163 lambda j: failure_is_reimage(j['failure_reason']),164 jobs165 ))166 if len(reimage_failures) < count:167 continue168 # Mark machine down169 machine_name = shortname(k)170 teuthology.lock.ops.update_lock(171 machine_name,172 description='reimage failed {0} times'.format(count),173 status='down',174 )175 log.error(176 'Reimage failed {0} times ... marking machine down'.format(count)177 )178def reimage(job_config):179 # Reimage the targets specified in job config180 # and update their keys in config after reimaging181 ctx = create_fake_context(job_config)182 # change the status during the reimaging process183 report.try_push_job_info(ctx.config, dict(status='waiting'))184 targets = job_config['targets']185 try:186 reimaged = reimage_machines(ctx, targets, job_config['machine_type'])187 except Exception as e:188 log.exception('Reimaging error. Nuking machines...')189 # Reimage failures should map to the 'dead' status instead of 'fail'190 report.try_push_job_info(ctx.config, dict(status='dead', failure_reason='Error reimaging machines: ' + str(e)))191 nuke(ctx, True)192 # Machine that fails to reimage after 10 times will be marked down193 check_for_reimage_failures_and_mark_down(targets)194 raise195 ctx.config['targets'] = reimaged196 # change the status to running after the reimaging process197 report.try_push_job_info(ctx.config, dict(status='running'))198def unlock_targets(job_config):199 serializer = report.ResultsSerializer(teuth_config.archive_base)200 job_info = serializer.job_info(job_config['name'], job_config['job_id'])201 machine_statuses = query.get_statuses(job_info['targets'].keys())202 # only unlock/nuke targets if locked and description matches203 locked = []204 for status in machine_statuses:205 name = shortname(status['name'])206 description = status['description']207 if not status['locked']:208 continue209 if description != job_info['archive_path']:210 log.warning(211 "Was going to unlock %s but it was locked by another job: %s",212 name, description...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run yandex-tank automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful