How to use seq2str method in Robotframework

Best Python code snippet using robotframework

generate.py

Source:generate.py Github

copy

Full Screen

...81 '''seq_len` indicates the length of the input number.'''82 r = [random.randint(0, 10**seq_len - 1) for _ in xrange(batch_size)]83 t = [2 * a for a in r]84 return mapstr(r, seq_len), mapstr(t, seq_len+1)85def seq2str(seq, sep=','):86 return sep.join(map(str, seq))87def getprioritySort(batch_size, seq_len, low=0, high=10, tostr=False):88 out = np.random.randint(low, high, (batch_size, seq_len))89 inp = [np.random.permutation(list(enumerate(l))).reshape(-1) for l in out]90 if tostr:91 return map(seq2str, inp), map(seq2str, out)92 else:93 return inp, out94def getinput(batch_size, seq_len, low=0, high=10, tostr=False, prepend_seqlen=False):95 '''samples a tensor of integers from `low` to `high`,96 with size (`batch_size`, `seq_len`).97 Return this tensor if not `tostr` else return a list of strings,98 with str_i being the comma-separated version of each row of tensor.'''99 tensor = np.random.randint(low, high, (batch_size, seq_len))100 if prepend_seqlen:101 tensor = np.append([[seq_len]]*batch_size, tensor, axis=1)102 if not tostr:103 return tensor104 else:105 return map(seq2str, tensor)106def getcopy(batched_in):107 return batched_in108def getrepeatCopy(batched_in, n):109 return np.tile(batched_in, n)110def getreverse(batch_in):111 return [s[::-1] for s in batch_in]112def getoddFirst(batch_in):113 return [list(s[::2]) + list(s[1::2]) for s in batch_in]114def _ngramflip(seq, n):115 return sum([list(seq[i+n-1:(i-1 if i > 0 else None):-1])116 for i in range(0, len(seq), n)],117 [])118def getbigramFlip(batch_in):119 '''`batch_in` should be a tensor fo size (batch_size, seq_len)'''120 return [_ngramflip(row, n=2) for row in batch_in]121def getbatch(batch_size, seq_len, task, start=START, end=END, sep=SEP, alphabet=10,122 nrepeat=None):123 gettask = globals()['get' + task]124 def _format(batch_in, batch_out):125 return ','.join([126 ','.join([start] + [seq2str(in_)] + [sep] + [seq2str(out)] + [end])127 for in_, out in izip(batch_in, batch_out)])128 if task in PERM:129 batch_in = getinput(batch_size, seq_len, high=alphabet)130 batch_out = gettask(batch_in)131 return _format(batch_in, batch_out)132 elif task == 'prioritySort':133 batch_in, batch_out = gettask(batch_size, seq_len, high=alphabet)134 samples = []135 for b in range(batch_size):136 sample = []137 for i in range(seq_len):138 pri = batch_in[b][2*i]139 val = batch_in[b][2*i + 1]140 sample += [start] * pri + [str(val)]141 samples.append(','.join([start] + sample + [sep, seq2str(batch_out[b]), end]))142 return ','.join(samples)143 elif task == 'repeatCopy':144 batch_in = getinput(batch_size, seq_len, high=alphabet)145 assert nrepeat is not None, 'nrepeat should not be None'146 batch_out = getrepeatCopy(batch_in, nrepeat)147 return ','.join([148 ','.join([start] * (nrepeat+1) + [seq2str(in_)] + [sep] + [seq2str(out)] + [end])149 for in_, out in izip(batch_in, batch_out)])150 elif task in ARITH:151 batch_in, batch_out = gettask(batch_size, seq_len)152 return ''.join([start + sep.join([in_, out]) + end153 for in_, out in izip(batch_in, batch_out)])154 else:155 raise Exception('unknown task')156def generate_set(batch_size, nbatch, min_len, max_len, task, alphabet=10, tofile=None,157 nrepeat_low=None, nrepeat_high=None):158 '''`min_len` and `max_len` are inclusive bounds on the input size.159 `nrepeat_low` and `nrepeat_high` are inclusive bounds on repeatCopy repeat size.'''160 batches = []161 for _ in xrange(nbatch):162 seq_len = np.random.randint(min_len, max_len+1)...

Full Screen

Full Screen

arguments.py

Source:arguments.py Github

copy

Full Screen

1import argparse2import data_loader3import os4import subprocess5base_dir = "/projects/ml/TrRosetta/PDB30-20FEB17"6base_torch_dir = base_dir7if not os.path.exists(base_dir):8 # training on blue9 base_dir = "/gscratch/TrRosetta/"10 if os.path.exists("/scratch/torch/hhr"):11 base_torch_dir = "/scratch"12 else:13 base_torch_dir = base_dir14TRUNK_PARAMS = ['n_module', 'n_module_str', 'n_layer', 'd_msa', 'd_msa_full', 'd_pair', 'd_templ',\15 'n_head_msa', 'n_head_pair', 'n_head_templ', 'd_hidden',\16 'r_ff', 'n_resblock', 'p_drop']17SE3_PARAMS = ['num_layers', 'num_channels', 'num_degrees', 'n_heads', 'div', 18 'l0_in_features', 'l0_out_features', 'l1_in_features', 'l1_out_features',19 'num_edge_features']20# absolute path to folder containing this file21script_dir = os.path.dirname(os.path.realpath(__file__))22def get_args():23 parser = argparse.ArgumentParser()24 # i/o parameters 25 io_group = parser.add_argument_group("i/o parameters")26 io_group.add_argument('-indir', default='models/',27 help='input directory for loading model checkpoint')28 io_group.add_argument('-outdir',default='models/',29 help='output directory for dumping model checkpoints')30 io_group.add_argument('-dont_load_ckpt', default=False, action='store_true',31 help='If True, DO NOT Load pretrained checkpoint params into model?')32 io_group.add_argument('--verbose', '-v', default=False, action='store_true',33 help='If True, will print lots of stuff for debugging')34 # training parameters35 train_group = parser.add_argument_group("training parameters")36 train_group.add_argument("-model_name", default=None,37 help="model name for saving")38 train_group.add_argument('-batch_size', type=int, default=1,39 help="Batch size [1]")40 train_group.add_argument('-lr', type=float, default=2.0e-4, 41 help="Learning rate [5.0e-4]")42 train_group.add_argument('-num_epochs', type=int, default=300,43 help="Number of epochs [300]")44 train_group.add_argument("-step_lr", type=int, default=300,45 help="Parameter for Step LR scheduler [300]")46 train_group.add_argument("-port", type=int, default=12319,47 help="PORT for ddp training, should be randomized [12319]")48 train_group.add_argument("-accum", type=int, default=1,49 help="Gradient accumulation when it's > 1 [1]")50 train_group.add_argument("-f_seq2str", type=float, default=0.5, 51 help="Frequency of doing the seq-->str task vs the str-->seq task")52 train_group.add_argument("-fixbb_des_frac", type=float, default=1.01, 53 help="Of the training examples that are fixed BB examples, what fraction\54 will be purely sequence design, rather than 'hal' task.")55 train_group.add_argument("-hal_mask_low", type=int, default=3,56 help='Smallest number of residues to mask out for a hal example')57 train_group.add_argument("-hal_mask_high", type=int, default=20, 58 help='Largest number of residues to mask out for a hal example')59 train_group.add_argument("-mode", type=str, default='str2seq', choices=['str2seq', 'seq2str', 'mixed'],60 help="training mode for model")61 train_group.add_argument('-mask_low', type=float, default=0.,62 help='Lower bound for amino acid masking fraction')63 train_group.add_argument('-mask_high', type=float, default=1.,64 help='Upper bound for amino acid masking fraction')65 train_group.add_argument('-mask_structure', default=False, action='store_true',66 help='If True, mask out portions of structure during training/val')67 train_group.add_argument('-get_grad_norm', default=False, action='store_true', 68 help='If True, spit out report of norms of gradients with respect to different loss terms')69 # data-loading parameters70 data_group = parser.add_argument_group("data loading parameters")71 data_group.add_argument('-val', default="%s/val_lists/xaa"%base_dir,72 help="Validation list [%s/val_lists/xaa]"%base_dir)73 data_group.add_argument('-maxseq', type=int, default=1000,74 help="Maximum depth of subsampled MSA [1000]")75 data_group.add_argument('-maxtoken', type=int, default=2**16,76 help="Maximum depth of subsampled MSA [2**16]")77 data_group.add_argument('-maxlat', type=int, default=50,78 help="Maximum depth of subsampled MSA [50]")79 data_group.add_argument("-lmin", type=int, default=100,80 help="Lower limit of crop size [100]")81 data_group.add_argument("-lmax", type=int, default=260,82 help="Upper limit of crop size [260]")83 data_group.add_argument("-rescut", type=float, default=3.5,84 help="Resolution cutoff [3.5]")85 data_group.add_argument("-slice", type=str, default="CONT",86 help="How to make crops [CONT (default) / DISCONT]")87 data_group.add_argument("-subsmp", type=str, default="LOG",88 help="How to subsample MSAs [UNI / LOG (default) / CONST]")89 data_group.add_argument('-mintplt', type=int, default=0,90 help="Minimum number of templates to select [0]")91 data_group.add_argument('-maxtplt', type=int, default=10,92 help="maximum number of templates to select [10]")93 data_group.add_argument('-seqid', type=float, default=150.0,94 help="maximum sequence identity cutoff for template selection [150.0]")95 # Trunk module properties96 trunk_group = parser.add_argument_group("Trunk module parameters")97 trunk_group.add_argument('-n_module', type=int, default=4,98 help="Number of iteration blocks without structure [4]")99 trunk_group.add_argument('-n_module_str', type=int, default=4,100 help="Number of iteration blocks with structure [4]")101 trunk_group.add_argument('-n_layer', type=int, default=1,102 help="Number of attention layer for each transformer encoder [1]")103 trunk_group.add_argument('-d_msa', type=int, default=384,104 help="Number of MSA features [384]")105 trunk_group.add_argument('-d_msa_full', type=int, default=64,106 help="Number of MSA features [64]")107 trunk_group.add_argument('-d_pair', type=int, default=288,108 help="Number of pair features [288]")109 trunk_group.add_argument('-d_templ', type=int, default=64,110 help="Number of templ features [64]")111 trunk_group.add_argument('-n_head_msa', type=int, default=12,112 help="Number of attention heads for MSA2MSA [12]")113 trunk_group.add_argument('-n_head_pair', type=int, default=8,114 help="Number of attention heads for Pair2Pair [8]")115 trunk_group.add_argument('-n_head_templ', type=int, default=4,116 help="Number of attention heads for template [4]")117 trunk_group.add_argument("-d_hidden", type=int, default=64,118 help="Number of hidden features for initial structure builder [64]")119 trunk_group.add_argument("-r_ff", type=int, default=4,120 help="ratio for feed-forward network in transformer encoder [4]")121 trunk_group.add_argument("-n_resblock", type=int, default=1,122 help="Number of residual blocks for MSA2Pair [1]")123 trunk_group.add_argument("-p_drop", type=float, default=0.1,124 help="Dropout ratio [0.1]")125 trunk_group.add_argument("-not_use_perf", action="store_true", default=False,126 help="Use performer or not [False]")127 # Structure module properties128 str_group = parser.add_argument_group("structure module parameters")129 str_group.add_argument('-num_layers', type=int, default=3,130 help="Number of equivariant layers in structure module block [3]")131 str_group.add_argument('-num_channels', type=int, default=32,132 help="Number of channels [32]")133 str_group.add_argument('-num_degrees', type=int, default=2,134 help="Number of degrees for SE(3) network [2]")135 str_group.add_argument('-l0_in_features', type=int, default=32,136 help="Number of type 0 input features [32]")137 str_group.add_argument('-l0_out_features', type=int, default=8,138 help="Number of type 0 output features [8]")139 str_group.add_argument('-l1_in_features', type=int, default=3,140 help="Number of type 1 input features [3]")141 str_group.add_argument('-l1_out_features', type=int, default=2,142 help="Number of type 1 output features [2]")143 str_group.add_argument('-num_edge_features', type=int, default=32,144 help="Number of edge features [32]")145 str_group.add_argument('-n_heads', type=int, default=4,146 help="Number of attention heads for SE3-Transformer [4]")147 str_group.add_argument("-div", type=int, default=4,148 help="Div parameter for SE3-Transformer [4]")149 # Loss function parameters150 loss_group = parser.add_argument_group("loss parameters")151 loss_group.add_argument('-w_dist', type=float, default=1.0,152 help="Weight on distd in loss function [1.0]")153 loss_group.add_argument('-w_str', type=float, default=1.0,154 help="Weight on strd in loss function [1.0]")155 loss_group.add_argument('-w_rms', type=float, default=1.0,156 help="Weight on rmsd in loss function [1.0]")157 loss_group.add_argument('-w_lddt', type=float, default=1.0,158 help="Weight on predicted lddt loss [1.0]")159 loss_group.add_argument('-w_blen', type=float, default=0.1,160 help="Weight on predicted blen loss [0.1]")161 loss_group.add_argument('-w_bang', type=float, default=0.1,162 help="Weight on predicted bang loss [0.1]")163 loss_group.add_argument('-w_aa', type=float, default=1.0,164 help="Weight on masked language task")165 # parse arguments166 args = parser.parse_args()167 # Setup dataloader parameters:168 loader_param = data_loader.set_data_loader_params(args)169 # make dictionary for each parameters170 trunk_param = {}171 for param in TRUNK_PARAMS:172 trunk_param[param] = getattr(args, param)173 if not args.not_use_perf:174 trunk_param["performer_N_opts"] = {"nb_features": 64, "feature_redraw_interval": 10000}175 trunk_param["performer_L_opts"] = {"nb_features": 64, "feature_redraw_interval": 10000}176 SE3_param = {}177 for param in SE3_PARAMS:178 if hasattr(args, param):179 SE3_param[param] = getattr(args, param)180 trunk_param['SE3_param'] = SE3_param 181 182 # loss function parameters 183 loss_param = {}184 seq2str_param = {}185 str2seq_param = {}186 187 # take in seq2str loss params from cmd line 188 for param in ['w_dist', 'w_str', 'w_rms', 'w_lddt', 'w_blen', 'w_bang', 'w_aa']:189 seq2str_param[param] = getattr(args, param)190 191 # take care of str2seq params here192 #str2seq_param = {193 # 'w_aa' : 1.0,194 # 'w_str' : 0.5,195 # 'w_bang': 0.05, 196 # 'w_blen': 0.05, 197 # 'w_dist': 0.05,198 # 'w_lddt': 0.05,199 # 'w_rms' : 0.05,200 # }201 202 # TODO: Make single set of params203 mixed_param = {204 'w_aa' : args.w_aa,205 'w_str' : args.w_str,206 'w_bang': args.w_bang,207 'w_blen': args.w_blen,208 'w_dist': args.w_dist,209 'w_lddt': args.w_lddt,210 'w_rms' : 0.05, # IGNORE211 }212 # put both into loss param 213 #loss_param['str2seq'] = str2seq_param214 #loss_param['seq2str'] = seq2str_param215 loss_param['str2seq'] = mixed_param216 loss_param['seq2str'] = mixed_param217 # add git hash of current commit218 args.commit = subprocess.check_output(f'git --git-dir {script_dir}/../../.git rev-parse HEAD',219 shell=True).decode().strip()220 with open(os.path.join(args.outdir, 'args.txt'), 'w') as fp:221 fp.write('Arguments from training session\n')222 for key,val in args.__dict__.items():223 fp.write(': '.join([str(key), str(val)])+'\n')...

Full Screen

Full Screen

reserver.py

Source:reserver.py Github

copy

Full Screen

...70 contentType = "text/plain;charset=utf-8"7172 self.sendResponseWithOutput(response, contentType, out)7374 def _seq2str(self, seq):75 """76 converts sequence of strings or tuples to string to be returned77 to AJAX call78 """79 out = u''80 for toplevel in seq:81 if toplevel is not None:82 if type(toplevel) in types.StringTypes:83 toplevel = (toplevel, )84 out += u', '.join(85 ["'%s'" % x.replace("'", "\\'") for x in toplevel])86 out += '\n'87 return out8889 def _dict2str(self, d):90 """91 converts dict to string to be returned to AJAX call92 """93 return u''.join([u"%s: '%s'\n" % (k, v) for k, v in d.items()])9495 def do_re(self, *args, **params):96 """97 return self._dict2str(params) + '\n' + self._seq2str(params.items())98 """99 txt = unicode(params['txt'], 'utf-8')100 regex = unicode(params['regex'], 'utf-8')101 method = params['method']102 options = re.UNICODE103104 optionList = (105 ('dotall', re.DOTALL),106 ('ignorecase', re.IGNORECASE),107 ('multiline', re.MULTILINE),108 ('verbose', re.VERBOSE))109110 for s, bitflag in optionList:111 if params.get(s, '') == 'true':112 options |= bitflag113114 try:115 cre = re.compile(regex, options)116 except Exception, e:117 out = u'ERROR: %s' % e118 else:119 results = getattr(cre, method)(txt)120 121 if results:122 # list result123 if type(results) == types.ListType:124 out = self._seq2str(results)125 # match object126 else:127 out = u'[TEXT MATCHES]\n'128 ng = self._dict2str(results.groupdict())129 if ng:130 out += u'### NAMED GROUPS ###\n%s' % ng131 g = self._seq2str(results.groups())132 if g:133 out += u'### GROUPS ###\n%s' % g134 else:135 out = u'[NO MATCH OR NOTHING FOUND]'136 return out137138 def do_POST(self):139 """140 test a re141 """142 length = int(self.headers.getheader('content-length')) 143 qs = self.rfile.read(length)144 params = dict(cgi.parse_qsl(qs, keep_blank_values=1))145 out = self.do_re(**params) ...

Full Screen

Full Screen

distance.py

Source:distance.py Github

copy

Full Screen

1"""2Distance module3Find distance between sequences4Written by Marshall Beddoe <mbeddoe@baselineresearch.net>5Copyright (c) 2004 Baseline Research6Licensed under the LGPL7"""8#9# Note: Gaps are denoted by the integer value 256 as to avoid '_' problems10#11import align, zlib12from Numeric import *13__all__ = [ "Distance", "Entropic", "PairwiseIdentity", "LocalAlignment" ]14class Distance:15 """Implementation of classify base class"""16 def __init__(self, sequences):17 self.sequences = sequences18 self.N = len(sequences)19 # NxN Distance matrix20 self.dmx = zeros((self.N, self.N), Float)21 for i in range(len(sequences)):22 for j in range(len(sequences)):23 self.dmx[i][j] = -124 self._go()25 def __repr__(self):26 return "%s" % self.dmx27 def __getitem__(self, i):28 return self.dmx[i]29 def __len__(self):30 return len(self.dmx)31 def _go(self):32 """Perform distance calculations"""33 pass34class Entropic(Distance):35 """Distance calculation based off compression ratios"""36 def _go(self):37 # Similarity matrix38 similar = zeros((self.N, self.N), Float)39 for i in range(self.N):40 for j in range(self.N):41 similar[i][j] = -142 #43 # Do compression ratio calculations44 #45 for i in range(self.N):46 for j in range(self.N):47 if similar[i][j] >= 0:48 continue49 seq1 = self.sequences[i][1]50 seq2 = self.sequences[j][1]51 # Convert sequences to strings, gaps denoted by '_'52 seq1str = ""53 for x in seq1:54 if x == 256:55 seq1str += '_'56 else:57 seq1str += chr(x)58 seq2str = ""59 for x in seq2:60 if x == 256:61 seq2str += '_'62 else:63 seq2str += chr(x)64 comp1 = zlib.compress(seq1str)65 comp2 = zlib.compress(seq2str)66 if len(comp1) > len(comp2):67 score = len(comp2) * 1.0 / len(comp1) * 1.068 else:69 score = len(comp1) * 1.0 / len(comp2) * 1.070 similar[i][j] = similar[j][i] = score71 #72 # Distance matrix73 #74 for i in range(self.N):75 for j in range(self.N):76 self.dmx[i][j] = similar[i][i] - similar[i][j]77class PairwiseIdentity(Distance):78 """Distance through basic pairwise similarity"""79 def _go(self):80 # Similarity matrix81 similar = zeros((self.N, self.N), Float)82 for i in range(self.N):83 for j in range(self.N):84 similar[i][j] = -185 #86 # Find pairs87 #88 for i in range(self.N):89 for j in range(self.N):90 if similar[i][j] >= 0:91 continue92 seq1 = self.sequences[i][1]93 seq2 = self.sequences[j][1]94 minlen = min(len(seq1), len(seq2))95 len1 = len2 = idents = 096 for x in range(minlen):97 if seq1[x] != 256:98 len1 += 1.099 if seq1[x] == seq2[x]:100 idents += 1.0101 if seq2[x] != 256:102 len2 += 1.0103 m = max(len1, len2)104 similar[i][j] = idents / m105 #106 # Distance matrix107 #108 for i in range(self.N):109 for j in range(self.N):110 self.dmx[i][j] = similar[i][i] - similar[i][j]111class LocalAlignment(Distance):112 """Distance through local alignment similarity"""113 def __init__(self, sequences, smx=None):114 self.smx = smx115 # If similarity matrix is None, make a quick identity matrix116 if self.smx == None:117 self.smx = zeros((257, 257), Float)118 for i in range(257):119 for j in range(257):120 if i == j:121 self.smx[i][j] = 1.0122 else:123 self.smx[i][j] = 0.0124 Distance.__init__(self, sequences)125 def _go(self):126 # Similarity matrix127 similar = zeros((self.N, self.N), Float)128 for i in range(self.N):129 for j in range(self.N):130 similar[i][j] = -1131 #132 # Compute similarity matrix of SW scores133 #134 for i in range(self.N):135 for j in range(self.N):136 if similar[i][j] >= 0:137 continue138 seq1 = self.sequences[i][1]139 seq2 = self.sequences[j][1]140 (nseq1, nseq2, edits1, edits2, score, gaps) = \141 align.SmithWaterman(seq1, seq2, self.smx, 0, 0)142 similar[i][j] = similar[j][i] = score143 #144 # Compute distance matrix of SW scores145 #146 for i in range(self.N):147 for j in range(self.N):148 if self.dmx[i][j] >= 0:149 continue150 self.dmx[i][j] = 1 - (similar[i][j] / similar[i][i])...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Robotframework automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful