How to use new_entry method in hypothesis

Best Python code snippet using hypothesis

fed_meeting_log_v1.py

Source:fed_meeting_log_v1.py Github

copy

Full Screen

1import scraperwiki2from bs4 import BeautifulSoup3from datetime import datetime4'''503/30/20126Scraper for Fed Meeting Logs.7Fed top-level links:8http://www.federalreserve.gov/newsevents/reform_systemic.htm9http://www.federalreserve.gov/newsevents/reform_derivatives.htm10http://www.federalreserve.gov/newsevents/reform_interchange.htm11http://www.federalreserve.gov/newsevents/reform_payments.htm12http://www.federalreserve.gov/newsevents/reform_consumer.htm13http://www.federalreserve.gov/newsevents/reform_resolution.htm14'''15BASE = "http://www.federalreserve.gov"16#Dictionary of Category: url pairs:17FED_MEETINGS = {18 "Systemic Designations, Enhanced Prudential Standards, and Banking Supervision and Regulation": "/newsevents/reform_systemic.htm",19 "Derivatives Markets and Products": "/newsevents/reform_derivatives.htm",20 "Interchange Fees": "/newsevents/reform_interchange.htm",21 "Payments, Settlement and Clearing Activities and Utilities": "/newsevents/reform_payments.htm",22 "Consumer Financial Protection": "/newsevents/reform_consumer.htm",23 "Resolution Framework": "/newsevents/reform_resolution.htm"24}25def main():26 for category, url in FED_MEETINGS.iteritems():27 scrape_page(BASE+url, category)28def scrape_page(url, category):29 '''30 Scrapes a single page of the Fed meeting logs.31 url: the page url32 category: String for the category of all meetings on page.33 '''34 raw_page = scraperwiki.scrape(url)35 page_soup = BeautifulSoup(raw_page)36 table = page_soup.find('table', {"class":"earegulate"})37 38 #Find the table and get the enry for each row.39 all_entries = []40 all_rows = table.find_all("tr")41 subcategory = ""42 for row in all_rows:43 new_entry = {}44 cols = row.find_all('td')45 if len(cols) != 3: 46 header = row.find('th')47 if "Attachment" not in header.get_text(): subcategory = header.get_text()48 continue49 50 new_entry['category'] = category51 new_entry['subcategory'] = subcategory52 new_entry['fullname'] = cols[0].get_text()53 new_entry['name'] = new_entry['fullname'].replace("(PDF)", "").replace("Meeting", "").strip()54 new_entry['link'] = cols[0].find('a').get('href')55 date_raw = cols[1].get_text().strip()56 try:57 date = datetime.strptime(date_raw, "%m/%d/%Y").date()58 except:59 date = datetime.strptime(date_raw.split('-')[0], "%m/%d/%Y").date()60 print new_entry['name'] + ": " + date_raw61 new_entry['date'] = date62 new_entry['type'] = cols[2].get_text()63 64 #new_entry['key'] = new_entry['name'] + ": " + date_raw65 all_entries.append(new_entry)66 #Update database:67 scraperwiki.sqlite.save(['category', 'name', 'date'], all_entries, table_name="MeetingTable1")68main()import scraperwiki69from bs4 import BeautifulSoup70from datetime import datetime71'''7203/30/201273Scraper for Fed Meeting Logs.74Fed top-level links:75http://www.federalreserve.gov/newsevents/reform_systemic.htm76http://www.federalreserve.gov/newsevents/reform_derivatives.htm77http://www.federalreserve.gov/newsevents/reform_interchange.htm78http://www.federalreserve.gov/newsevents/reform_payments.htm79http://www.federalreserve.gov/newsevents/reform_consumer.htm80http://www.federalreserve.gov/newsevents/reform_resolution.htm81'''82BASE = "http://www.federalreserve.gov"83#Dictionary of Category: url pairs:84FED_MEETINGS = {85 "Systemic Designations, Enhanced Prudential Standards, and Banking Supervision and Regulation": "/newsevents/reform_systemic.htm",86 "Derivatives Markets and Products": "/newsevents/reform_derivatives.htm",87 "Interchange Fees": "/newsevents/reform_interchange.htm",88 "Payments, Settlement and Clearing Activities and Utilities": "/newsevents/reform_payments.htm",89 "Consumer Financial Protection": "/newsevents/reform_consumer.htm",90 "Resolution Framework": "/newsevents/reform_resolution.htm"91}92def main():93 for category, url in FED_MEETINGS.iteritems():94 scrape_page(BASE+url, category)95def scrape_page(url, category):96 '''97 Scrapes a single page of the Fed meeting logs.98 url: the page url99 category: String for the category of all meetings on page.100 '''101 raw_page = scraperwiki.scrape(url)102 page_soup = BeautifulSoup(raw_page)103 table = page_soup.find('table', {"class":"earegulate"})104 105 #Find the table and get the enry for each row.106 all_entries = []107 all_rows = table.find_all("tr")108 subcategory = ""109 for row in all_rows:110 new_entry = {}111 cols = row.find_all('td')112 if len(cols) != 3: 113 header = row.find('th')114 if "Attachment" not in header.get_text(): subcategory = header.get_text()115 continue116 117 new_entry['category'] = category118 new_entry['subcategory'] = subcategory119 new_entry['fullname'] = cols[0].get_text()120 new_entry['name'] = new_entry['fullname'].replace("(PDF)", "").replace("Meeting", "").strip()121 new_entry['link'] = cols[0].find('a').get('href')122 date_raw = cols[1].get_text().strip()123 try:124 date = datetime.strptime(date_raw, "%m/%d/%Y").date()125 except:126 date = datetime.strptime(date_raw.split('-')[0], "%m/%d/%Y").date()127 print new_entry['name'] + ": " + date_raw128 new_entry['date'] = date129 new_entry['type'] = cols[2].get_text()130 131 #new_entry['key'] = new_entry['name'] + ": " + date_raw132 all_entries.append(new_entry)133 #Update database:134 scraperwiki.sqlite.save(['category', 'name', 'date'], all_entries, table_name="MeetingTable1")...

Full Screen

Full Screen

_transformers.py

Source:_transformers.py Github

copy

Full Screen

1# --------------------------------------------------------------------------------------------2# Copyright (c) Microsoft Corporation. All rights reserved.3# Licensed under the MIT License. See License.txt in the project root for license information.4# --------------------------------------------------------------------------------------------5# pylint: disable=line-too-long6from collections import OrderedDict7from azure.cli.core.util import CLIError8def table_transform_output(result):9 table_result = []10 for key in ('host', 'username', 'password', 'location', 'skuname', 'resource group', 'id', 'version', 'connection string'):11 entry = OrderedDict()12 entry['Property'] = key13 entry['Value'] = result[key]14 table_result.append(entry)15 return table_result16def table_transform_output_list_servers(result):17 table_result = []18 if not result:19 return table_result20 for key in result:21 new_entry = OrderedDict()22 new_entry['Name'] = key['name']23 new_entry['Resource Group'] = key['resourceGroup']24 new_entry['Location'] = key['location']25 new_entry['Version'] = key['version']26 new_entry['Storage Size(GiB)'] = int(key['storageProfile']['storageMb']) / 1024.027 new_entry['Tier'] = key['sku']['tier']28 new_entry['SKU'] = key['sku']['name']29 if 'flexibleServers' in result[0]['id']:30 new_entry['State'] = key['state']31 new_entry['HA State'] = key['haState']32 new_entry['Availability zone'] = key['availabilityZone']33 table_result.append(new_entry)34 return table_result35def table_transform_output_list_skus(result):36 table_result = []37 if len(result) > 1:38 skus_tiers = result[0]["supportedFlexibleServerEditions"]39 for skus in skus_tiers:40 tier_name = skus["name"]41 try:42 keys = skus["supportedServerVersions"][0]["supportedVcores"]43 for key in keys:44 new_entry = OrderedDict()45 new_entry['SKU'] = key['name']46 new_entry['Tier'] = tier_name47 new_entry['vCore'] = key['vCores']48 new_entry['Memory'] = str(int(key['supportedMemoryPerVcoreMb']) * int(key['vCores']) // 1024) + " GiB"49 new_entry['Max Disk IOPS'] = key['supportedIOPS']50 table_result.append(new_entry)51 except:52 raise CLIError("There is no sku available for this location.")53 return table_result54def table_transform_output_list_servers_single_server(result):55 table_result = []56 for key in result:57 new_entry = OrderedDict()58 new_entry['Name'] = key['name']59 new_entry['Resource Group'] = key['resourceGroup']60 new_entry['Location'] = key['location']61 new_entry['Version'] = key['version']62 new_entry['Storage Size(GiB)'] = int(key['storageProfile']['storageMb']) / 1024.063 new_entry['Tier'] = key['sku']['tier']64 new_entry['SKU'] = key['sku']['name']65 table_result.append(new_entry)66 return table_result67def table_transform_output_list_skus_single_server(result):68 table_result = []69 if len(result) > 1:70 for tiers in result:71 tier_name = tiers["id"]72 try:73 keys = tiers["serviceLevelObjectives"]74 for key in keys:75 new_entry = OrderedDict()76 new_entry['SKU'] = key['id']77 new_entry['Tier'] = tier_name78 new_entry['vCore'] = key['vCore']79 new_entry['Generation'] = key['hardwareGeneration']80 table_result.append(new_entry)81 except:82 raise CLIError("There is no sku available for this location.")...

Full Screen

Full Screen

init_db.py

Source:init_db.py Github

copy

Full Screen

1# to RUN use:2# python3 manage.py shell < init_db.py3from api.models import AvailableDaysTimes4order = 05new_entry = AvailableDaysTimes(day='seg', time='manha', order=order)6new_entry.save()7order +=18new_entry = AvailableDaysTimes(day='seg', time='tarde', order=order)9new_entry.save()10order +=111new_entry = AvailableDaysTimes(day='seg', time='noite', order=order)12new_entry.save()13order +=114new_entry = AvailableDaysTimes(day='ter', time='manha', order=order)15new_entry.save()16order +=117new_entry = AvailableDaysTimes(day='ter', time='tarde', order=order)18new_entry.save()19order +=120new_entry = AvailableDaysTimes(day='ter', time='noite', order=order)21new_entry.save()22order +=123new_entry = AvailableDaysTimes(day='qua', time='manha', order=order)24new_entry.save()25order +=126new_entry = AvailableDaysTimes(day='qua', time='tarde', order=order)27new_entry.save()28order +=129new_entry = AvailableDaysTimes(day='qua', time='noite', order=order)30new_entry.save()31order +=132new_entry = AvailableDaysTimes(day='qui', time='manha', order=order)33new_entry.save()34order +=135new_entry = AvailableDaysTimes(day='qui', time='tarde', order=order)36new_entry.save()37order +=138new_entry = AvailableDaysTimes(day='qui', time='noite', order=order)39new_entry.save()40order +=141new_entry = AvailableDaysTimes(day='sex', time='manha', order=order)42new_entry.save()43order +=144new_entry = AvailableDaysTimes(day='sex', time='tarde', order=order)45new_entry.save()46order +=147new_entry = AvailableDaysTimes(day='sex', time='noite', order=order)48new_entry.save()49order +=150new_entry = AvailableDaysTimes(day='sab', time='manha', order=order)51new_entry.save()52order +=153new_entry = AvailableDaysTimes(day='sab', time='tarde', order=order)54new_entry.save()55order +=156new_entry = AvailableDaysTimes(day='sab', time='noite', order=order)57new_entry.save()58order +=159new_entry = AvailableDaysTimes(day='dom', time='manha', order=order)60new_entry.save()61order +=162new_entry = AvailableDaysTimes(day='dom', time='tarde', order=order)63new_entry.save()64order +=165new_entry = AvailableDaysTimes(day='dom', time='noite', order=order)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful