How to use url_download method in avocado

Best Python code snippet using avocado_python

api_feeder.py

Source:api_feeder.py Github

copy

Full Screen

1# Zello API at https://zellowork.com/api.htm#auth_section2# Takes as INPUT:3# - API Key4# - md5(Zello admin account password)5# Calls zello_api_connect.py to get the API tokens (token & sid)6# Calls password_hasher.py to get API connection password 7# --> md5(md5(Zello admin account password) + {token} + {API Key})8# LOGINS to Zello API9# GETs METADATA10# Creates a dictionnary with only the new messages11# ---> yet to be coded12# Searches for the media key13# ---> yet to be coded14# Downloads the corresponding messages and send them to a file15# ---> yet to be coded16# I need to get inspiration from https://www.digitalocean.com/community/tutorials/how-to-use-web-apis-in-python-317# for further coding18print(" ")19import json20from urllib import urlencode21import urllib22import wget23import requests24import zello_api_connect25from password_hasher import hash_md526from print_request import print_r27### -----------------------------28### INPUT variables29API_key = "RZCLSHHV4TE30WCMLUIWXI7HOM1HD8WV"30Zello_admin_password_md5 = "4cd60c519c8c507aaf3426b6323c0624"31url = "https://kraaft.zellowork.com"32url_login = "https://kraaft.zellowork.com/user/login"33url_gettoken = "https://kraaft.zellowork.com/user/gettoken"34headers = {35 'Content-Type': "application/x-www-form-urlencoded",36 'cache-control': "no-cache"37 }38nbr_msg_to_check = "max=1"39### -----------------------------40### GETS TOKENS: 41gettoken_response = zello_api_connect.gettoken_zello(url = url_gettoken, headers = headers)42token = gettoken_response["token"]43sid = gettoken_response["sid"]44print("token : " + token)45print("sid : " + sid)46print(" ")47### -----------------------------48### GETS md5(md5(Zello admin account password) + {token} + {API Key})49pre_h_pswd = Zello_admin_password_md5 + token + API_key50print("Pre md5 hash password : " + pre_h_pswd)51h_pswd = hash_md5(pre_h_pswd)52print("md5(md5(Zello admin account password) + token + API Key) = " + h_pswd)53### -----------------------------54### DEFINE THE LOGIN REQUEST55# Define the payload associated with the request56payload = "username=Admin&password="+h_pswd57print("payload : " + payload)58# Define the sid to be included in the API url request59querystring = {"sid":sid}60# Define the Response object61response = requests.request("POST", url_login, data = payload, headers = headers, params = querystring)62### Print the request object for dev purposes63print_r("POST", url_login, data = payload, headers = headers, params = querystring)64### -----------------------------65### DEFINE THE GET METADATA REQUEST66### Request test to get the 1 last history metadata message67# Don't know why, the max just doesn't work, I get 100 messages68# I'll have to further select the first element of the list, no choice69url_metada = url + "/history/getmetadata"70request_metadata = requests.request("POST", url_metada, data = nbr_msg_to_check, params = querystring)71# Print the request for dev purposes72print_r("POST", url_metada, data = nbr_msg_to_check, headers = {}, params = querystring)73print("request_metada : " + request_metadata.text)74### -----------------------------75### DOWNLOADS A DEFINED OBJECT76# Just a test with the media key of the last Zello message77media_key = "1901d582e2e467671680d73dfb4d5944ac6ba959240412e718f424154fa03c9d"78url_media = url + "/history/getmedia/key/" + media_key79request_media = requests.request("GET", url_media, data = "", headers ={}, params = querystring)80print_r("GET", url_media, data = "", headers = {}, params = querystring)81print("request media :" + request_media.text) # Prints the server response82## Extracts url containing file from the request body83# Transforms json response into a dictionnary and extract the url field84request_media_dict = json.loads(request_media.text)85url_download = request_media_dict["url"]86print("url_download = " + url_download)87print("Beginning the download of the file...") 88## Didn't make it work quite well...89# urllib.request.urletrieve(url_download, "/Users/marcnegre/Documents/Kraaft/0. Bootstrap/0. Code/kraaft_poc_sodapem")90# r = requests.get(url_download)91# print(len(r.content))92# urllib.urlretrieve(url_download)...

Full Screen

Full Screen

zippyshare_downloader.py

Source:zippyshare_downloader.py Github

copy

Full Screen

1import os2import re3import sys4import math5try:6 import click7 import requests8 from bs4 import BeautifulSoup as bs9except ImportError:10 exit("- import error, you need install module first !")11r = requests.Session()12def download(url):13 try:14 req = r.get(url)15 origin = re.search('https://(.*?)/',url).group(1)16 if "(Math.pow(a, 3)+b)" in req.text:17 script = bs(req.text,'html.parser').findAll('script')18 for script in script:19 if '(Math.pow(a, 3)+b)' in str(script):20 var_a = re.search('var a = (.*?);', str(script)).group(1)21 break22 var_b = 323 middle_math = int(math.pow(int(var_a),3) + var_b)24 elemen = re.search('document.getElementById\(\'dlbutton\'\).href = \"(.*?)\"\+(.*?)\+"(.*?)\";', req.text)25 url_download = "https://" + origin + elemen.group(1) + str(middle_math) + elemen.group(3)26 elif 'var a = function() {return 1};' in req.text:27 a = lambda: 128 b = lambda: a() + 129 c = lambda: b() + 130 d = re.search('<span id="omg" class="(.*?)"',req.text).group(1)31 d = int(d) * 232 elemen = re.search('document.getElementById\(\'dlbutton\'\).href\ = \"(.*?)\"\+\((.*?) \+ a\(\) \+ b\(\) \+ c\(\) \+ d \+ (.*?)\)\+"(.*?)"',req.text)33 first = int(eval(elemen.group(2)) + a() + b() + c() + d + eval(elemen.group(3)))34 url_download = 'https://' + origin + elemen.group(1) + str(first) + elemen.group(4)35 else:36 elemen = re.search('document.getElementById\(\'dlbutton\'\).href = \"(.*?)\" \+ \((.*?)\) \+ \"(.*?)\";',req.text)37 url_download = "https://" + origin + elemen.group(1) + str(eval(elemen.group(2))) + elemen.group(3)38 print("- redirect to browser !")39 click.launch(url_download)40# print(url_download)41 except ValueError:42 sys.exit(f"- failed download from {url}")43 except AttributeError:44 sys.exit("- failed download file not found !")45def main():46 if len(sys.argv) < 2:47 print()48 print("- input url zippyshare !")49 url_download = input('- url : ')50 else:51 url_download = sys.argv[1]52 download(url_download)53if __name__ == "__main__":54 try:55 main()56 except KeyboardInterrupt:...

Full Screen

Full Screen

yahoo_stock.py

Source:yahoo_stock.py Github

copy

Full Screen

1import dryscrape2from bs4 import BeautifulSoup3import time4import datetime5import re6#we visit the main page to initialise sessions and cookies7session = dryscrape.Session()8session.set_attribute('auto_load_images', False)9session.set_header('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36') 10#call this once as it is slow(er) and then you can do multiple download, though there seems to be a limit after which you have to reinitialise...11session.visit("https://finance.yahoo.com/quote/AAPL/history?p=AAPL")12response = session.body()13#get the dowload link14soup = BeautifulSoup(response, 'lxml')15for taga in soup.findAll('a'):16 if taga.has_attr('download'):17 url_download = taga['href']18print(url_download)19#now replace the default end date end start date that yahoo provides20s = "2007-05-18"21period1 = '%.0f' % time.mktime(datetime.datetime.strptime(s, "%Y-%m-%d").timetuple())22e = "2017-05-18"23period2 = '%.0f' % time.mktime(datetime.datetime.strptime(e, "%Y-%m-%d").timetuple())24#now we replace the period download by our dates, please feel free to improve, I suck at regex25m = re.search('period1=(.+?)&', url_download)26if m:27 to_replace = m.group(m.lastindex)28 url_download = url_download.replace(to_replace, period1) 29m = re.search('period2=(.+?)&', url_download)30if m:31 to_replace = m.group(m.lastindex)32 url_download = url_download.replace(to_replace, period2)33#and now viti and get body and you have your csv34session.visit(url_download)35csv_data = session.body()36#and finally if you want to get a dataframe from it37import sys38if sys.version_info[0] < 3: 39 from StringIO import StringIO40else:41 from io import StringIO42import pandas as pd43df = pd.read_csv(StringIO(csv_data), index_col=[0], parse_dates=True)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful