How to use before_start method in autotest

Best Python code snippet using autotest_python

standard.py

Source:standard.py Github

copy

Full Screen

1"""2Standard manipulations that are found in many different genomic tools.3"""4# Built-in modules #5import sys6# Internal modules #7from gMiner.operations.genomic_manip import Manipulation as Manip8from gMiner import common9################################################################################10class merge(Manip):11 '''Merges features that are adjacent or overlapping in one stream'''12 def __init__(self):13 self.name = 'Internal merge'14 self.input_tracks = [{'type': 'track', 'name': 'X', 'kind': 'qualitative', 'fields': ['start','end', '...']}]15 self.input_constraints = []16 self.input_request = []17 self.input_special = [{'type': 'in_datatype', 'name': 'in_type'}]18 self.input_by_chrom = []19 self.output_tracks = [{'type': 'track', 'kind': 'qualitative', 'fields': {'same': 0}}]20 self.output_constraints = []21 self.output_other = []22 def chr_collapse(self, *args): return common.collapse.by_appending(*args)23 def quan(self, X):24 # Setup #25 for x in X: break26 if 'x' not in locals(): return27 # Core loop #28 for y in X:29 if y[0] == x[1]:30 x = list(x)31 l_x = x[1] - x[0]32 l_y = y[1] - y[0]33 x[2] = (l_x*x[2] + l_y*y[2]) / (l_x+l_y)34 x[1] = y[1]35 else:36 yield tuple(x)37 x = y38 # Last feature #39 yield tuple(x)40 def qual(self, X):41 # Setup #42 for x in X: break43 if 'x' not in locals(): return44 # Core loop #45 for y in X:46 if y[0] <= x[1]:47 x = list(x)48 x[1] = max(x[1], y[1])49 x[2] = x[2] + ' + ' + y[2]50 x[3] = x[3] + y[3]51 x[4] = x[4] == y[4] and x[4] or 052 else:53 yield tuple(x)54 x = y55 # Last feature #56 yield tuple(x)57#-------------------------------------------------------------------------------------------#58class filter(Manip):59 '''Computes the overlap of the first stream against the second stream60 returning only complete features from the first stream'''61 def __init__(self):62 self.name = 'Filter features'63 self.input_tracks = [{'type': 'track', 'name': 'X', 'kind': 'qualitative', 'fields': ['start','end','...']},64 {'type': 'track', 'name': 'Y', 'kind': 'qualitative', 'fields': ['start','end']}]65 self.input_constraints = ['ordered']66 self.input_request = []67 self.input_special = []68 self.input_by_chrom = []69 self.output_tracks = [{'type': 'track', 'kind': 'qualitative', 'fields': {'same': 0}}]70 self.output_constraints = []71 self.output_other = []72 def chr_collapse(self, *args): return common.collapse.by_intersection(*args)73 def __call__(self, X, Y):74 sentinel = (sys.maxint, sys.maxint)75 X = common.sentinelize(X, sentinel)76 Y = common.sentinelize(Y, sentinel)77 x = X.next()78 y = Y.next()79 if x == sentinel or y == sentinel: continue_loop = False80 else: continue_loop = True81 while continue_loop:82 open_window = y[0]83 close_window = y[1]84 # Extend the y window as long as possible #85 while True:86 if y == sentinel: break87 y = Y.next()88 if y[0] > close_window: break89 if y[1] > close_window: close_window = y[1]90 # Read features from X until overshooting the y window #91 while True:92 if x[0] >= close_window: break93 if x[1] > open_window: yield x94 x = X.next()95 if x == sentinel:96 continue_loop = False97 break98#-------------------------------------------------------------------------------------------#99class neighborhood(Manip):100 '''Given a stream of features and four integers `before_start`, `after_end`,101 `after_start` and `before_end`, this manipulation will output,102 for every feature in the input stream, one or two features103 in the neighboorhod of the orginal feature.104 * Only `before_start` and `after_end` are given::105 (start, end, ...) -> (start+before_start, end+after_end, ...)106 * Only `before_start` and `after_start` are given::107 (start, end, ...) -> (start+before_start, start+after_start, ...)108 * Only `after_end` and `before_end` are given::109 (start, end, ...) -> (end+before_end, end+after_end, ...)110 * If all four parameters are given, a pair of features is outputed::111 (start, end, ...) -> (start+before_start, start+after_start, ...)112 (end+before_end, end+after_end, ...)113 * If the boolean parameter `on_strand` is set to True,114 features on the negative strand are inverted as such::115 (start, end, ...) -> (start-after_end, start-before_end, ...)116 (end-after_start, end-before_start, ...)117 * If the inputed stream is quantitative, only `before_start` and `after_end`118 are taken into consideration and must be equal.'''119 def __init__(self):120 self.name = 'Neighborhood regions'121 self.input_tracks = [{'type': 'track', 'name': 'X', 'kind': 'any', 'fields': ['start','end', '...']}]122 self.input_constraints = []123 self.input_request = [{'type': int, 'key': 'before_start', 'name': 'before_start', 'default': None},124 {'type': int, 'key': 'after_end', 'name': 'after_end', 'default': None},125 {'type': int, 'key': 'after_start', 'name': 'after_start', 'default': None},126 {'type': int, 'key': 'before_end', 'name': 'before_end', 'default': None}]127 self.input_special = [{'type': 'in_datatype', 'name': 'in_type'}]128 self.input_by_chrom = [{'type': 'stop_val', 'name': 'stop_val'}]129 self.output_tracks = [{'type': 'track', 'kind': 'qualitative', 'fields': {'same': 0}}]130 self.output_constraints = []131 self.output_other = []132 def chr_collapse(self, *args): return common.collapse.by_appending(*args)133 def qual(self, stop_val, **kwargs):134 def generate(X, before_start=None, after_end=None, after_start=None, before_end=None, on_strand=False):135 if before_start and after_start:136 if before_start > after_start:137 raise Exception("'before_start' cannot be larger than 'after_start'")138 if before_end and after_end:139 if before_end > after_end:140 raise Exception("before_end cannot be larger than 'after_end'")141 if not on_strand:142 if before_start and after_end and after_start and before_end:143 for x in X:144 yield (x[0]+before_start, x[0]+after_start) + x[2:]145 yield (x[1]+before_end, x[1]+after_end) + x[2:]146 if before_start and after_start:147 for x in X: yield (x[0]+before_start, x[0]+after_start) + x[2:]148 if before_end and after_end:149 for x in X: yield (x[1]+before_end, x[1]+after_end) + x[2:]150 if before_start and after_end:151 for x in X: yield (x[0]+before_start, x[1]+after_end) + x[2:]152 else:153 if before_start and after_end and after_start and before_end:154 for x in X:155 yield (x[0]-after_end, x[0]-before_end) + x[2:]156 yield (x[1]-after_start, x[1]-before_start) + x[2:]157 if before_start and after_start:158 for x in X: yield (x[1]-after_start, x[1]-before_start) + x[2:]159 if after_end and before_end:160 for x in X: yield (x[0]-after_end, x[0]-before_end) + x[2:]161 if before_start and after_end:162 for x in X: yield (x[0]-after_end, x[1]-before_start) + x[2:]163 X = generate(**kwargs)164 from .basic import bounded165 for x in bounded()(X, 0, stop_val): yield x166 def quan(self, stop_val, **kwargs):167 def generate(X, before_start=None, after_end=None, after_start=None, before_end=None, on_strand=False):168 if on_strand:169 raise Exception("As the track is quantitative, you cannot specify 'on_strand=True'")170 if after_start:171 raise Exception("As the track is quantitative, you cannot specify a value for 'after_start'")172 if before_end:173 raise Exception("As the track is quantitative, you cannot specify a value for 'before_end'")174 if before_start != after_end:175 raise Exception("As the track is quantitative, 'before_start' and 'after_start' need to be equal")176 for x in X: yield (x[0]+before_start, x[1]+before_start) + x[2:]177 X = generate(**kwargs)178 from .basic import bounded179 for x in bounded()(X, 0, stop_val): yield x180#-----------------------------------#181# This code was written by the BBCF #182# http://bbcf.epfl.ch/ #183# webmaster.bbcf@epfl.ch #...

Full Screen

Full Screen

app.py

Source:app.py Github

copy

Full Screen

1# ## Step 2 - Climate App2# Now that you have completed your initial analysis, design a Flask API based on the queries that you have just developed.3import sqlalchemy4from sqlalchemy.ext.automap import automap_base5from sqlalchemy.orm import Session6from sqlalchemy import create_engine, func, inspect, desc7from flask import Flask, jsonify8import matplotlib.pyplot as plt9import numpy as np10import pandas as pd11import datetime as dt12 13engine = create_engine("sqlite:///sqlalchemy-challenge/Resources/hawaii.sqlite")14Base = automap_base()15Base.prepare(engine, reflect=True)16Base.classes.keys()17inspector = inspect(engine)18Measurement = Base.classes.measurement19m_columns = inspector.get_columns('measurement')20Station = Base.classes.station21s_columns = inspector.get_columns('station')22M = Measurement23S = Station 24# * Use Flask to create your routes.25app = Flask(__name__)26# ### Routes27# * `/`28@app.route('/')29# * Home page.30def home_page():31 # * List all routes that are available.32 return (33 f"Welcome to my climate analysis homepage! <br/>"34 f"Available routes inclue: <br/>"35 f"/api/v1.0/precipitation <br/>"36 f"/api/v1.0/stations <br/>"37 f"/api/v1.0/tobs <br/>"38 f"/api/v1.0/<start> <br/>"39 f"/api/v1.0/<start>/<end>"40 )41@app.route("/api/v1.0/precipitation")42# * `/api/v1.0/precipitation`43def precipitation():44 session = Session(engine)45 percip = session.query(M.date, M.prcp).order_by(M.date).all()46 session.close()47 return jsonify(percip)48# * Convert the query results to a dictionary using `date` as the key and `prcp` as the value.49# * Return the JSON representation of your dictionary.50@app.route("/api/v1.0/stations")51# * `/api/v1.0/stations`52def stations():53 session = Session(engine)54 stations = session.query(S.name).all()55 session.close()56 # * Return a JSON list of stations from the dataset.57 return jsonify(stations)58 59@app.route("/api/v1.0/tobs")60 # * `/api/v1.0/tobs`61def tobs():62 session = Session(engine)63 last_12_months = (dt.date(2017, 8, 23) - dt.timedelta(days=365))64 sel = [Measurement.date, Measurement.prcp]65 last_12_percip = session.query(*sel).filter(Measurement.date >= last_12_months).group_by(Measurement.date).all()66 session.close()67 # * Return a JSON list of temperature observations (TOBS) for the previous year.68 return jsonify(last_12_percip)69 70 71@app.route('/api/v1.0/<start>', endpoint='start')72# * `/api/v1.0/<start>` 73def start(start):74 start = '2010-11-23'75 76 session = Session(engine)77 # * Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.78 # * When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date.79 sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]80 before_start = pd.DataFrame(columns=('Date', 'Min Temp', 'Avg Temp', 'Max Temp'))81 before_start['Date'] = pd.date_range(start='2010-01-01', end=start)82 j=083 for i in before_start['Date'].astype(str):84 data = session.query(*sel).filter(func.strftime("%Y-%m-%d", Measurement.date) == i).all()85 before_start['Min Temp'][j] = data[0][0]86 before_start['Avg Temp'][j] = data[0][1]87 before_start['Max Temp'][j] = data[0][2]88 j+=189 session.close()90 results = before_start.to_json(orient='index', date_format='iso')91 return jsonify(results)92 93 94# and `/api/v1.0/<start>/<end>95@app.route('/api/v1.0/<start>/<end>', endpoint='start_end')96# * `/api/v1.0/<start>` 97def start_end(start=None, end=None):98 session = Session(engine)99 # * Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.100 # * When given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive.101 sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]102 before_start = pd.DataFrame(columns=('Date', 'Min Temp', 'Avg Temp', 'Max Temp'))103 before_start['Date'] = pd.date_range(start=start, end=end)104 j=0105 for i in before_start['Date'].astype(str):106 data = session.query(*sel).filter(func.strftime("%Y-%m-%d", Measurement.date) == i).all()107 before_start['Min Temp'][j] = data[0][0]108 before_start['Avg Temp'][j] = data[0][1]109 before_start['Max Temp'][j] = data[0][2]110 j+=1111 session.close()112 results = before_start.to_json(orient='index', date_format='iso')113 return jsonify(results)114if __name__ == '__main__':...

Full Screen

Full Screen

04_partition.py

Source:04_partition.py Github

copy

Full Screen

1import linkedlist as ll2def partition(head, x):3 """4 Partition a linked list around a value x.5 All nodes less than x come before all nodes greater than or equal to x.6 Runtime7 O(n) O(n)8 """9 10 before_start = None11 before_end = None12 after_start = None13 after_end = None14 current = head15 while current is not None:16 next = current.next17 current.next = None18 if current.data < x:19 if before_start is None:20 before_start = current21 before_end = before_start22 else:23 before_end.next = current24 before_end = current25 else:26 if after_start is None:27 after_start = current28 after_end = after_start29 else:30 after_end.next = current31 after_end = current32 current = current.next33 if before_start is None:34 return after_start35 before_end.next = after_start36 return before_start37if __name__ == "__main__":38 linked_list = ll.LinkedList()39 linked_list.append(3)40 linked_list.append(5)41 linked_list.append(8)42 linked_list.append(5)43 linked_list.append(10)44 linked_list.append(2)45 linked_list.append(1)46 x = 547 linked_list.print_list()48 partition_list = partition(linked_list.head, x)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful