How to use get_table_rows method in Gherkin-python

Best Python code snippet using gherkin-python

zotero_helpers.py

Source:zotero_helpers.py Github

copy

Full Screen

...45 import pandas as pd46 pd.options.display.max_colwidth = 4047 pd.options.display.max_rows = 2048 def pandas_sql(table, columns):49 return pd.DataFrame(ut.get_table_rows(cur, table, columns),50 columns=columns)51 item_df = pandas_sql('items', ('itemID', 'itemTypeID', 'libraryID', 'key')).set_index('itemID', drop=False)52 tags_df = pandas_sql('tags', ('tagID', 'name', 'type', 'libraryID', 'key')).set_index('tagID', drop=False)53 itemData_df = pandas_sql('itemData', ('itemID', 'fieldID', 'valueID'))54 itemTag_df = pandas_sql('itemTags', ('itemID', 'tagID'))55 itemDataValues_df = pandas_sql('itemDataValues', ('valueID', 'value')).set_index('valueID')56 field_df = pandas_sql('fields', ('fieldID', 'fieldName', 'fieldFormatID')).set_index('fieldID')57 itemData_df['value'] = itemDataValues_df['value'].loc[itemData_df['valueID'].values].values58 itemData_df['fieldName'] = field_df['fieldName'].loc[itemData_df['fieldID'].values].values59 titles = itemData_df[itemData_df['fieldName'] == 'title']60 assert len(ut.unique(ut.map_vals(len, titles.groupby('itemID').indices).values())) == 161 # itemTag_df.groupby('itemID').count()62 # Find how often each tag is used63 tagid_to_count = itemTag_df.groupby('tagID').count()64 tagid_to_count = tagid_to_count.rename(columns={'itemID': 'nItems'})65 tagid_to_count['name'] = tags_df.loc[tagid_to_count.index]['name']66 tagid_to_count = tagid_to_count.sort_values('nItems')67 bad_tags = tagid_to_count[tagid_to_count['nItems'] == 1]68 tagid_to_count['tag_ncharsize'] = tagid_to_count['name'].apply(len)69 tagid_to_count = tagid_to_count.sort_values('tag_ncharsize')70 bad_tags = tagid_to_count[tagid_to_count['tag_ncharsize'] > 25]['name'].values.tolist()71 def clean_tags2():72 api_key = 'fBDBqRPwW9O3mYyNLiksBKZy'73 base_url = 'https://api.zotero.org'74 library_id = '1279414'75 library_type = 'user'76 from pyzotero import zotero77 zot = zotero.Zotero(library_id, library_type, api_key)78 for chunk in ut.ProgChunks(bad_tags, 50):79 zot.delete_tags(*chunk)80 if False:81 api_key = 'fBDBqRPwW9O3mYyNLiksBKZy'82 base_url = 'https://api.zotero.org'83 user_id = '1279414'84 userOrGroupPrefix = '/users/' + user_id85 params = {'v': 3, 'key': api_key}86 items_resp = requests.get(base_url + userOrGroupPrefix + '/items', params=params)87 print(items_resp.content)88 print(items_resp)89 json_tags = []90 get_url = base_url + userOrGroupPrefix + '/tags'91 while True:92 print('get_url = %r' % (get_url,))93 tag_resp = requests.get(get_url, params=params)94 if tag_resp.status_code != 200:95 break96 json_tags.extend(tag_resp.json())97 if 'next' in tag_resp.links:98 get_url = tag_resp.links['next']['url']99 else:100 break101 version_to_tags = ut.ddict(list)102 bad_tags = []103 for tag in ut.ProgIter(json_tags, label='parsing tags'):104 # x = requests.get(tag['links']['self']['href'], params=params)105 if tag['meta']['numItems'] == 1:106 import urllib2107 try:108 bad_tags.append(urllib2.quote(tag['tag']))109 except Exception as ex:110 print('cant encode tag=%r' % (tag,))111 pass112 for chunk in ut.ProgIter(ut.ichunks(bad_tags, 50), length=len(bad_tags) / 50):113 search_url = base_url + userOrGroupPrefix + '/items?tag=' + ' || '.join(chunk)114 r = requests.get(search_url, params=params)115 matching_items = r.json()116 # assert len(matching_items) == 1117 for item in matching_items:118 version = item['version']119 version_to_tags[item['version']].append(tag['tag'])120 # DELETE MULTIPLE TAGS121 import requests122 for chunk in ut.ichunks(bad_tags['name'], 50):123 import urllib2124 encoded_chunk = []125 for t in chunk:126 try:127 encoded_chunk.append(urllib2.quote(t))128 except Exception:129 print(t)130 suffix = ' || '.join(encoded_chunk)131 delete_url = base_url + userOrGroupPrefix + '/tags?' + suffix132 print('delete_url = %r' % (delete_url,))133 resp = requests.delete(delete_url, params=params)134 bad_tags = tagid_to_count[tagid_to_count['nItems'] == 1]135 bad_tags['tagID'] = bad_tags.index136 for tagid in bad_tags:137 delete from itemTags where tagID in (select tagID from tags where type=1);138 pass139 for name in k['name'].values.tolist()140 item_df['title'] = titles.set_index('itemID')['value']141 for idx, item in zotero.index.items():142 sql_title = item_df.loc[item.id]['title']143 if item.title != sql_title:144 if pd.isnull(sql_title) and item.title is not None:145 print(item.__dict__)146 print(item_df.loc[item.id])147 print('item.title = %r' % (item.title,))148 print('sql_title = %r' % (sql_title,))149 assert False150 duplicate_tags = [151 (name, idxs) for name, idxs in tags_df.groupby('name', sort=True).indices.items() if len(idxs) > 2152 ]153 tagname_to_tagid = tags_df.groupby('name', sort=True).first()154 new_to_oldtags = {}155 # Determine which tagi to use for each name156 for tagname, idxs in duplicate_tags:157 tags_subdf = tags_df.iloc[idxs]158 mapping = itemTag_df[itemTag_df['tagID'].isin(tags_subdf['tagID'])]159 tag_hist = mapping.groupby('tagID').count()160 best_tagid = tag_hist['itemID'].idxmax()161 new_to_oldtags[best_tagid] = set(tag_hist['itemID'].values) - {best_tagid}162 tagname_to_tagid.loc[tagname] = tags_df.loc[best_tagid]163 # for col in tagname_to_tagid.columns:164 # tagname_to_tagid.loc[tagname][col] = tags_df.loc[best_tagid][col]165 # tags_df.loc[best_tagid]166 if False:167 # Update tagIds168 for newid, oldids in new_to_oldtags.items():169 for oldid in oldids:170 # cur.execute('SELECT itemID, tagID FROM itemTags WHERE tagID=?', (oldid,))171 import sqlite3172 try:173 cmd = 'UPDATE itemTags SET tagID=? WHERE tagID=?'174 args = (newid, oldid)175 print('(%s) args = %r' % (cmd, args,))176 cur.execute(cmd, args)177 print(cur.fetchall())178 except sqlite3.IntegrityError:179 print('error')180 pass181 # tags_df.groupby('name', sort=True)182 # itemTag_df.groupby('itemID')183 # duptags = tags_df.iloc[tags_df.groupby('name', sort=True).indices['animals']]184 # duptags['tagID']185 # flags = itemTag_df['tagID'].isin(duptags['tagID'])186 # dup_rel = itemTag_df[flags]187 # item_df['title'].loc[dup_rel['itemID']].values188 # tags_df.iloc[tags_df.groupby('name', sort=True).indices['animals']]189 # tags_df[tags_df['type'] == 1]190 # tags_df[tags_df['type'] == 0]191 # tags_df['libraryID'].unique()192 # tags_df['type'].unique()193 '''194 SELECT195 SELECT FROM itemTags WHERE name in (animals)196 '''197 item_tag_pairs = ut.get_table_rows(cur, 'itemTags', ('itemID', 'tagID'))198 # Group tags by item199 itemid_to_tagids = ut.group_pairs(item_tag_pairs)200 # Group items by tags201 tagid_to_itemids = ut.group_pairs(map(tuple, map(reversed, item_tag_pairs)))202 # mapping from tagid to name203 tagid_to_name = dict(ut.get_table_rows(cur, 'tags', ('tagID', 'name')))204 tagid_freq = list(ut.sort_dict(ut.map_vals(len, tagid_to_itemids), 'vals').items())205 ut.sort_dict(ut.map_vals(sum, ut.group_pairs([(freq, tagid_to_name.get(tagid, tagid)) for tagid, freq in tagid_freq])), 'vals')206 tagname_freq = ut.map_keys(lambda k: tagid_to_name.get(k, k), tagid_freq)207def get_item_resource():208 """209 from zotero_helpers import *210 """211 #item_list = zotero.search('Distinctive Image Features from Scale-Invariant Keypoints')212 #item_list = zotero.search('lowe_distinctive_2004')213 zotero_fpath = get_zotero_path()214 from os.path import join215 # FIND THE BIBTEX ITEMID216 import sqlite3217 bibsql = join(zotero_fpath, 'betterbibtex.sqlite')218 con = sqlite3.connect(bibsql)219 cur = con.cursor()220 # ut.util_sqlite.get_tablenames(cur)221 #ut.util_sqlite.print_database_structure(cur)222 itemID = ut.util_sqlite.get_table_rows(cur, 'keys', 'itemID', where='citekey=?', params='lowe_distinctive_2004')223 con.close()224 ###############225 zotero = get_libzotero()226 item = zotero.index[itemID]227 cur = zotero.cur # NOQA228 zotero.index[1434].title229 # ENTIRE DATABASE INFO230 ut.print_database_structure(cur)231 # FIND WHERE ATTACHMENT EXITS232 for tablename in ut.get_tablenames(cur):233 try:234 x = ut.get_table_csv(cur, tablename).find('ijcv04.pdf')235 except Exception as ex:236 continue237 if x != -1:238 print(tablename)239 print(x)240 tablename = 'itemDataValues'241 print(ut.truncate_str(ut.get_table_csv(cur, tablename), maxlen=5000))242 tablename = 'itemDataValues'243 column_list = ut.get_table_columns(cur, tablename)244 import six245 for column in column_list:246 for rowx, row in enumerate(column):247 if isinstance(row, six.string_types):248 if row.find('ijcv04.pdf') > -1:249 print(rowx)250 print(row)251 valueID = column_list[0][3003]252 value = column_list[1][3003]253 ut.util_sqlite.get_table_rows(cur, 'itemData', None, where='valueID=?', params=valueID, unpack=False)254 ###255 #ut.rrrr()256 tablename = 'itemAttachments'257 colnames = tuple(ut.get_table_columnname_list(cur, tablename))258 print(ut.get_table_csv(cur, tablename, ['path']))259 _row_list = ut.get_table_rows(cur, tablename, 'itemID', unpack=True)260 ut.get_table_rows(cur, tablename, colnames, unpack=False)261 ut.get_table_num_rows(cur, tablename)262 itemID = ut.util_sqlite.get_table_rows(cur, tablename, colnames, where='itemID=?', params=itemID, unpack=False)263def test_libzoter():264 zotero = get_libzotero()265 item_list = zotero.search('')266 for item in item_list:267 print(item.title)268 pass269 if False:270 #set(ut.flatten([dir(x) for x in item_list]))271 item_list = zotero.search('Combining Face with Face-Part Detectors under Gaussian Assumption')272 [x.simple_format() for x in item_list]273 item_list = zotero.search('Lowe')274 if False:275 import mozrepl276 repl = mozrepl.Mozrepl(4242, u'localhost') # NOQA277 temp_fpath = 'foo.txt'278 repl.connect(4242, u'localhost')279 r"""280 http://www.curiousjason.com/zoterotobibtex.html281 https://github.com/bard/mozrepl/wiki282 "C:\Program Files (x86)\Mozilla Firefox\firefox.exe" -profile "C:\Users\joncrall\AppData\Roaming\Mozilla\Firefox\Profiles\7kadig32.default" -repl 4242283 telnet localhost 4242284 """285 execute_string = unicode(ut.codeblock(286 r'''287 filename = '%s';288 var file = Components.classes["@mozilla.org/file/local;1"].createInstance(Components.interfaces.nsILocalFile);289 file.initWithPath(filename);290 var zotero = Components.classes['@zotero.org/Zotero;1'].getService(Components.interfaces.nsISupports).wrappedJSObject;291 var translatorObj = new Zotero.Translate('export');292 translatorObj.setLocation(file);293 translatorObj.setTranslator('9cb70025-a888-4a29-a210-93ec52da40d4');294 translatorObj.translate();295 ''') % (temp_fpath))296 print(execute_string)297 repl.execute(execute_string)298def test_zotero_sql():299 r"""300 "C:\Program Files (x86)\Mozilla Firefox\firefox.exe"301 "C:\Program Files (x86)\Mozilla Firefox\firefox.exe" -profile "C:\Users\joncrall\AppData\Roaming\Mozilla\Firefox\Profiles\7kadig32.default" -repl 4242302 References:303 http://www.cogsci.nl/blog/tutorials/97-writing-a-command-line-zotero-client-in-9-lines-of-code304 https://forums.zotero.org/discussion/2919/command-line-export-to-bib-file/305 http://www.curiousjason.com/zoterotobibtex.html306 https://addons.mozilla.org/en-US/firefox/addon/mozrepl/307 # bibtex plugin308 https://github.com/ZotPlus/zotero-better-bibtex309 https://groups.google.com/forum/#!forum/zotero-dev310 Ignore:311 C:\Users\joncrall\AppData\Roaming\Zotero\Zotero\Profiles\xrmkwlkz.default\zotero\translators312 """313 cur = zotero.cur # NOQA314 #ut.rrrr()315 # ENTIRE DATABASE INFO316 ut.print_database_structure(cur)317 tablename_list = ut.get_tablenames(cur)318 colinfos_list = [ut.get_table_columninfo_list(cur, tablename) for tablename in tablename_list] # NOQA319 numrows_list = [ut.get_table_num_rows(cur, tablename) for tablename in tablename_list] # NOQA320 tablename = 'items'321 colnames = ('itemID',) # NOQA322 colinfo_list = ut.get_table_columninfo_list(cur, tablename) # NOQA323 itemtype_id_list = ut.get_table_rows(cur, 'items', ('itemTypeID',))324 ut.get_table_columninfo_list(cur, 'itemTypeFields')325 ut.get_table_rows(cur, 'itemTypeFields', ('fieldID',), where='itemTypeID=?', params=itemtype_ids) # NOQA326 ut.get_table_rows(cur, 'itemTypeFields', ('orderIndex',), where='itemTypeID=?', params=itemtype_ids) # NOQA327 ut.get_table_rows(cur, 'itemTypeFields', ('',), where='itemTypeID=?', params=itemtype_ids) # NOQA328 itemData # NOQA329 # Item Table INFO330 ut.get_table_columninfo_list(cur, 'tags')331 ut.get_table_columninfo_list(cur, 'items')332 ut.get_table_columninfo_list(cur, 'itemTypeFields')333 ut.get_table_columninfo_list(cur, 'itemData')334 ut.get_table_columninfo_list(cur, 'itemDataValues')335 ut.get_table_columninfo_list(cur, 'fields')336 ut.get_table_columninfo_list(cur, 'fieldsCombined')337 ut.get_table_rows(cur, 'fields', ('fieldName',))338 # The ID of each item in the database339 itemid_list = ut.get_table_rows(cur, 'items', ('itemID',))340 # The type of each item341 itemtype_id_list = ut.get_list_column(ut.get_table_rows(cur, 'items', ('itemTypeID',), where='itemID=?', params=itemid_list), 0)342 # The different types of items343 itemtype_ids = list(set(itemtype_id_list))344 # The fields of each item type345 fieldids_list_ = ut.get_table_rows(cur, 'itemTypeFields', ('fieldID',), where='itemTypeID=?', params=itemtype_ids)346 orderids_list_ = ut.get_table_rows(cur, 'itemTypeFields', ('orderIndex',), where='itemTypeID=?', params=itemtype_ids)347 fieldids_list = [ut.sortedby(f, o) for f, o in zip(fieldids_list_, orderids_list_)]348 itemtypeid2_fields = dict(zip(itemtype_ids, fieldids_list))349 itemid_fieldids_list = [[(itemID[0], fieldID[0]) for fieldID in itemtypeid2_fields[itemTypeID]] for itemID, itemTypeID in list(zip(itemid_list, itemtype_id_list))[0:7]]350 flat_list, cumsum_list = ut.invertible_flatten2(itemid_fieldids_list)351 # Get field values352 flat_valueID_list = ut.get_table_rows(cur, 'itemData', ('valueID',), where='itemID=? and fieldID=?', params=flat_list)353 valueIDs_list = ut.unflatten2(flat_valueID_list, cumsum_list)354 filtered_itemid_fieldids_list = [[if_ for if_, v in zip(ifs, vs) if len(v) > 0] for ifs, vs in zip(itemid_fieldids_list, valueIDs_list)]355 filtered_flat_list, filtered_cumsum_list = ut.invertible_flatten2(filtered_itemid_fieldids_list)356 # Get field values357 filt_flat_valueID_list = ut.get_table_rows(cur, 'itemData', ('valueID',), where='itemID=? and fieldID=?', params=filtered_flat_list)358 filt_flat_valueID_list_ = ut.get_list_column(filt_flat_valueID_list, 0)359 filt_flat_fieldname_list = ut.get_table_rows(cur, 'fields', ('fieldName',), where='fieldID=?', params=ut.get_list_column(filtered_flat_list, [1]))360 filt_flat_value_list = ut.get_table_rows(cur, 'itemDataValues', ('value',), where='valueID=?', params=filt_flat_valueID_list_) # NOQA361 #362 filt_fieldname_list = ut.unflatten2(filt_flat_fieldname_list, filtered_cumsum_list) # NOQA363 filt_valueIDs_list = ut.unflatten2(filt_flat_valueID_list, filtered_cumsum_list) # NOQA364 ut.get_table_rows(cur, 'itemTypeFields', ('fieldID', 'orderIndex'), where='itemTypeID=?', params=itemtype_ids)365 all_values = ut.get_list_column(ut.get_table_rows(cur, 'itemDataValues', ('value',)), 0)366 import re367 import six368 for value in all_values:369 if isinstance(value, six.string_types) and re.search('CVPR', value):370 print(value)371 #key_list = ut.get_table_rows(cur, 'items', 'key')...

Full Screen

Full Screen

get_tables.py

Source:get_tables.py Github

copy

Full Screen

1# get_real_tables.py2#3# Contains a method, get_table_rows(), to obtain any table from the main property record4# page.5#6# May eventually be expanded to parse tables for individual buildings.7import requests8from bs4 import BeautifulSoup9import helpers10import re11def get_valuation_list(id):12 return get_table_rows(7, id)13def get_sae_list(id):14 return get_table_rows(8, id)15def get_transaction_list(id):16 return get_table_rows(10, id)17def get_permit_list(id):18 return get_table_rows(12, id)19def get_building_list(id):20 return get_table_rows(13, id)21# get_table_rows()22#23# Gets a list of rows in a table, across multiple pages if needed. Returns a dictionary.24#25# table_number: 7 for valuation record, 10 for transaction record26def get_table_rows(table_number, id):27 # We need a session to go between pages properly. Otherwise it goes like: 1, 2, 2, 3, 2, 3, 2, ... forever28 sesh = requests.session()29 # get the first page30 req = sesh.get("https://docs.oklahomacounty.org/AssessorWP5/AN-R.asp?PROPERTYID="+str(id))31 data = req.text32 the_soup = BeautifulSoup(data, features="lxml")33 rows = the_soup.find_all('table')[table_number].tbody.find_all('tr')34 done = False35 valuation_list = []36 while done == False: # Iterate over pages37 for r in rows:38 if r.find_all('p'): # Get this page39 if table_number==7: # Valuation History40 cur_dict = get_valuation_record(r)...

Full Screen

Full Screen

test.py

Source:test.py Github

copy

Full Screen

...67 r = self.chain.push_action('hello', 'create', create)68 print_console(r)69 logger.info('+++++++create elapsed: %s', r['elapsed'])70 self.chain.produce_block()71 r = self.chain.get_table_rows(True, 'hello', 'EOS', 'stat', "", "")72 logger.info(r)73 assert r['rows'][0]['issuer'] == 'hello'74 assert r['rows'][0]['max_supply'] == '100.0000 EOS'75 assert r['rows'][0]['supply'] == '0.0000 EOS'76 try:77 r = self.chain.push_action('hello', 'create', create)78 except Exception as e:79 error_msg = e.args[0]['action_traces'][0]['except']['stack'][0]['data']['s']80 assert error_msg == 'token with symbol already exists'81 # logger.info(json.dumps(e.args[0], indent=' '))82 #test issue83 issue = {'to': 'hello', 'quantity': '1.0000 EOS', 'memo': 'issue to alice'}84 r = self.chain.push_action('hello', 'issue', issue)85 logger.info('+++++++issue elapsed: %s', r['elapsed'])86 self.chain.produce_block()87 r = self.chain.get_table_rows(True, 'hello', 'EOS', 'stat', "", "")88 logger.info(r)89 assert r['rows'][0]['issuer'] == 'hello'90 assert r['rows'][0]['max_supply'] == '100.0000 EOS'91 assert r['rows'][0]['supply'] == '1.0000 EOS'92 r = self.chain.get_table_rows(True, 'hello', 'hello', 'accounts', "", "")93 logger.info(r)94 assert r['rows'][0]['balance'] == '1.0000 EOS'95 try:96 issue = {'to': 'eosio', 'quantity': '1.0000 EOS', 'memo': 'issue to alice'}97 self.chain.push_action('hello', 'issue', issue)98 except Exception as e:99 error_msg = e.args[0]['action_traces'][0]['except']['stack'][0]['data']['s']100 assert error_msg == 'tokens can only be issued to issuer account'101 #test transfer102 transfer = {'from': 'hello', 'to': 'alice', 'quantity': '1.0000 EOS', 'memo': 'transfer from alice'}103 r = self.chain.push_action('hello', 'transfer', transfer)104 logger.info('+++++++transfer elapsed: %s', r['elapsed'])105 self.chain.produce_block()106 r = self.chain.get_table_rows(True, 'hello', 'hello', 'accounts', "", "")107 logger.info(r)108 assert r['rows'][0]['balance'] == '0.0000 EOS'109 r = self.chain.get_table_rows(True, 'hello', 'alice', 'accounts', "", "")110 logger.info(r)111 assert r['rows'][0]['balance'] == '1.0000 EOS'112 # transfer back113 transfer = {'from': 'alice', 'to': 'hello', 'quantity': '1.0000 EOS', 'memo': 'transfer back'}114 r = self.chain.push_action('hello', 'transfer', transfer, {'alice': 'active'})115 logger.info('+++++++transfer elapsed: %s', r['elapsed'])116 self.chain.produce_block()117 #quantity chain.Asset, memo118 retire = {'quantity': '1.0000 EOS', 'memo': 'retire 1.0000 EOS'}119 r = self.chain.push_action('hello', 'retire', retire)120 logger.info('+++++++retire elapsed: %s', r['elapsed'])121 r = self.chain.get_table_rows(True, 'hello', 'hello', 'accounts', "", "")122 assert r['rows'][0]['balance'] == '0.0000 EOS'123 r = self.chain.get_table_rows(True, 'hello', 'EOS', 'stat', "", "")124 logger.info(r)125 assert r['rows'][0]['supply'] == '0.0000 EOS'126 r = self.chain.get_table_rows(True, 'hello', 'helloworld11', 'accounts', "", "")127 assert len(r['rows']) == 0128 #owner chain.Name, symbol chain.Symbol, ram_payer chain.Name129 #test open130 open_action = {'owner': 'helloworld11', 'symbol': '4,EOS', 'ram_payer': 'hello'}131 r = self.chain.push_action('hello', 'open', open_action)132 logger.info('+++++++open elapsed: %s', r['elapsed'])133 r = self.chain.get_table_rows(True, 'hello', 'helloworld11', 'accounts', "", "")134 assert r['rows'][0]['balance'] == '0.0000 EOS'135 #test close136 close_action = {'owner': 'helloworld11', 'symbol': '4,EOS'}137 r = self.chain.push_action('hello', 'close', close_action, {'helloworld11': 'active'})138 logger.info('+++++++close elapsed: %s', r['elapsed'])139 self.chain.produce_block()140 r = self.chain.get_table_rows(True, 'hello', 'helloworld11', 'accounts', "", "")...

Full Screen

Full Screen

test_eclectic.py

Source:test_eclectic.py Github

copy

Full Screen

...18 driver.get(test_url)19 driver.maximize_window()20def teardown_module():21 driver.quit()22def get_table_rows(filter_text, sort_field):23 """24 This method will fetch all the rows from the table and return them as a list of dictionaries.25 By doing this we will be able to access the column data using a the keys which are not impacted by any changes in the order/index of the columns.26 """27 filter = driver.find_element(By.ID, 'filter-input')28 filter.clear()29 filter.send_keys(filter_text)30 dropdown = Select(driver.find_element(By.ID, 'sort-select'))31 dropdown.select_by_value(sort_field)32 table_elements = driver.find_element(By.XPATH, '//*[@id="app"]/div[3]/div[2]').find_elements(By.CLASS_NAME,'table-row')33 table_data = []34 35 for table_row in table_elements:36 row = {}37 for col in table_row.find_elements(By.CLASS_NAME, 'table-data'): 38 col_name = col.get_attribute("class").split(' ')[1].split('-')[1] 39 row[col_name] = col.text.lower()40 table_data.append(row)41 return table_data42# Verify if the page loaded successfully43def test_01_check_page_load():44 h1 = driver.find_element(By.TAG_NAME, 'h1')45 assert h1.text == "Cyber attack completely fake statistics", "Page din't load successfully"46# Verify if table is present 47def test_02_check_table_presence():48 global full_table_data49 full_table_data = get_table_rows('', 'name') # This is the full table data. This will be used as reference for the validations done in all the succeeding tests.50 assert full_table_data is not None, "Table not present"51# Verifying that the data is sorted by NAME as expected52def test_03_filter_empty_sort_name():53 table = get_table_rows('', 'name')54 # Making sure that all the records in the full table are pulled in the above call since we are not filtering by anything55 assert len(table) == len(full_table_data), "Row count mismatch"56 table_names = [x['name'] for x in table]57 # Making sure that the names fetched are all in sorted order58 assert table_names == sorted(table_names), "Table not sorted accurately on NAME"59# Verifying that the data is sorted by NUMBER OF CASES as expected60def test_04_filter_empty_sort_cases():61 table = get_table_rows('', 'cases')62 table_cases = []63 for row in table:64 if 'k' in row['cases']:65 table_cases.append(float(row['cases'][:-1])*1000)66 elif 'm' in row['cases']:67 table_cases.append(float(row['cases'][:-1])*1000000)68 elif 'b' in row['cases']:69 table_cases.append(float(row['cases'][:-1])*1000000000)70 else:71 table_cases.append(float(row['cases']))72 assert table_cases == sorted(table_cases), "Table not sorted accurately on NUMBER OF CASES"73# Verifying that the data is sorted by AVERAGE IMPACT SCORE as expected74def test_05_filter_empty_sort_averageImpact():75 table = get_table_rows('', 'averageImpact')76 table_averageImpact = [float(x['averageImpact']) for x in table]77 assert table_averageImpact == sorted(table_averageImpact), "Table not sorted accurately on AVERAGE IMPACT SCORE"78 79# Verifying that the data is sorted by COMPLEXITY as expected80def test_06_filter_empty_sort_complexity():81 complexity_order = {'low': 1, 'medium': 2, 'high': 3}82 table = get_table_rows('', 'complexity')83 table_complexity = [x['complexity'] for x in table]84 # Sorting the complexity based on the custom sort order, i.e., low < medium < high and making sure its same as the order retrieved from the table85 assert table_complexity == sorted(table_complexity, key=lambda x:complexity_order[x]), "Table not sorted accurately on COMPLEXITY"86# Verifying that the filtered data is sorted by NAME as expected87# This test case can be written as 2 cases, i.e., 1 for full match filtering and another for partial match filtering if need be.88def test_07_filter_valid_text_sort_name():89 filter_txt = 'hi'90 table = get_table_rows(filter_txt, 'name')91 # Comparing the number of records returned by filtering in the UI to the number of records present in the refernce table having the filter text in the name or complexity92 assert len(table) == len([row for row in full_table_data if filter_txt in row['name'] or filter_txt in row['complexity']]), "Row count mismatch"93 table_names = [x['name'] for x in table]94 # Validating that the names are sorted as expected in the filtered records95 assert table_names == sorted(table_names), "Table not sorted accurately on NAME"96# Verifying that the filtering by numeric fields dont fetch any records97# Writing this test assuming this is a requirement, that the filters should work only on the text field.98# Another assumption here is that there are no numbers present in the text fields. This may not work as expected in such a scenario.99def test_08_filter_numbers_sort_name():100 filter_txt = '95'101 table = get_table_rows(filter_txt, 'name')102 assert len(table) == 0, "Filter not working as expected with number"103# Verifying that when we filter with a text that is not present in the table, then no records are returned.104def test_09_filter_invalid_text_sort_name():105 filter_txt = 'zzzzzzzzzzzzzzzzzzzzzzz'106 table = get_table_rows(filter_txt, 'name')107 assert len(table) == 0, "Filter not working as expected when using a text that is not present in the table"108# Verifying that the filter is case insensitive109def test_10_filter_case_insensitive_text_sort_averageImpact():110 table_upper = get_table_rows('m', 'name')111 table_lower = get_table_rows('M', 'name')...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Gherkin-python automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful