How to use _get_information method in lisa

Best Python code snippet using lisa_python

tab_scrapper.py

Source:tab_scrapper.py Github

copy

Full Screen

...31 except NameError:32 return False33 else:34 return issubclass(dynamic_tab_scrapper, TabScrapper)35 def _get_information(self):36 """Abstract method that all the subclasses have to override."""37 pass38 def get_information(self):39 """Returns the information of the tab using the implementation of the concrete classes."""40 try:41 return self._get_information()42 except(AttributeError, KeyError) as e:43 self._logger.error(f"Error scrapping the tab information: {e}")44class KeyValueTabScrapper(TabScrapper):45 """Class that knows how to handle tabs with a structure of <tr><td class="key"></td><td class="value"></td></tr>."""46 def _get_key(self, key_column):47 """Given the key column it takes and returns the text of the column."""48 return key_column.text.replace(u'\xa0', u' ')49 def _get_value(self, value_column):50 """Given the value column it takes and returns the text of the column."""51 pass52 def _get_information(self):53 """54 Iterates over all the rows in the table, builds a dict with the keys and values columns and, returns the dict.55 """56 info_dict = {}57 table = self._tab.find('table', class_='details')58 for row in table.find_all('tr'):59 row_key, row_value = row.find(class_='key'), row.find(class_='value')60 key_with_emoji, value = self._get_key(row_key), self._get_value(row_value)61 split_key = key_with_emoji.split(" ", 1)62 if len(split_key):63 key = split_key[-1]64 info_dict.update({key: value})65 return info_dict66class ScoresTabScrapper(KeyValueTabScrapper):67 """Class that knows how to scrap the data from the Scores tab."""68 # Rank regex69 rank_re = re.compile(r'.*\(Rank #(\d+)\).*')70 def __init__(self, soup, **kwargs):71 super().__init__(soup, **kwargs)72 self._tab = self._tab_scroller.find("div", class_="tab tab-ranking show")73 def _get_value(self, value_column):74 """Override the super class method. Given the value column it takes and returns the text of the value."""75 return value_column.div.div.text, self.get_bar_value(value_column)76 def get_rank(self):77 """Given the city details soup, knows how to take the rank number."""78 details = self._tab.find("table", class_="details")79 rank, = self.rank_re.match(details.find("td", class_="value").get_text()).groups()80 return rank81 def get_bar_value(self, value_column):82 """Given the city details soup, knows how to take the percentage that the bar is filled."""83 style = value_column.div.find("div", attrs={'class': 'filling'}).attrs.get("style")84 if style and (width := style.split(':', 1)) and len(width):85 return float(width[-1].strip('%')) / 100 if width[-1] else None86class DigitalNomadGuideTabScrapper(KeyValueTabScrapper):87 """Class that knows how to scrap the data from the Digital Nomad Guide tab."""88 def __init__(self, soup, **kwargs):89 super().__init__(soup, **kwargs)90 self._tab = self._tab_scroller.find("div", class_="tab tab-digital-nomad-guide")91 def _get_value(self, value_column):92 url = a.attrs.get('href') if (a := value_column.find('a')) else None93 return value_column.text, None, url94 def get_continent(self):95 """Given the city details soup, knows how to take the rank number."""96 return self._tab.find("table", class_="details").find("td", class_="value").get_text()97class CostOfLivingTabScrapper(KeyValueTabScrapper):98 """Class that knows how to scrap the data from the Cost of Living tab."""99 def __init__(self, soup, **kwargs):100 super().__init__(soup, **kwargs)101 self._tab = self._tab_scroller.find("div", class_="tab editable tab-cost-of-living double-width")102 def _get_value(self, value_column):103 # The variable "a" is assigned in the if statement104 url = a.attrs.get('href') if (a := value_column.find('a')) else None105 return value_column.text, None, url106class ProsAndConsTabScrapper(TabScrapper):107 """Class that knows how to scrap the data from the Pros and Cons tab."""108 def __init__(self, soup, **kwargs):109 super().__init__(soup, **kwargs)110 self._tab = self._tab_scroller.find("div", class_="tab tab-pros-cons")111 self._keys_dict = {0: 'pros', 1: 'cons'}112 def _get_information(self):113 """114 Iterates over both divs pros and cons, builds an array with all the pros and all the cons, and returns a dict115 with the type {pros: [...pros], cons: [...cons]}.116 """117 pros_cons = []118 pros_cons_dict = {}119 for i, div in enumerate(self._tab.find_all("div")):120 for p in div.find_all("p"):121 pro_con = p.get_text(strip=True)122 pros_cons.append(pro_con)123 pros_cons_dict.update({self._keys_dict[i]: pros_cons})124 pros_cons = []125 return pros_cons_dict126class ReviewsTabScrapper(TabScrapper):127 """Class that knows how to scrap the data from the Reviews tab."""128 def __init__(self, soup, **kwargs):129 super().__init__(soup, **kwargs)130 self._tab = self._tab_scroller.find("div", class_="tab tab-reviews")131 def _get_review(self, element):132 return element.find("div", class_="review-text").text133 def _get_published_date(self, element):134 return element.find("meta", attrs={'itemprop': 'datePublished'}).attrs.get('content')135 def _get_information(self):136 """Takes all the reviews in the tab, and returns an array with all of them."""137 reviews = self._tab.find_all("div", class_="review", attrs={'itemprop': 'review'})138 return [(self._get_review(elem), self._get_published_date(elem)) for elem in reviews]139class WeatherTabScrapper(TabScrapper):140 """Class that knows how to scrap the data from the Weather tab."""141 def __init__(self, soup, **kwargs):142 super().__init__(soup, **kwargs)143 self._tab = self._tab_scroller.find("div", class_="tab tab-weather")144 self.climate_table = self._tab.find("table", class_="climate")145 self._value_getters_by_key = {**dict.fromkeys(['Feels', 'Real'], self._get_temperature),146 **dict.fromkeys(['Humidity', 'Rain', 'Cloud', 'Air quality', 'Sun'],147 self._get_weather_indexes)}148 def _get_information(self):149 """Takes all the value from the weather matrix, and builds a dict with tuples for each weather attribute.150 Then, returns the dict."""151 weather_dict = {}152 table_body = self.climate_table153 rows = table_body.find_all('tr')154 months = [col.get_text() for col in rows[0].find_all('td')[1:]]155 for row in rows[1:]:156 cols = row.find_all('td')157 key = cols[0].get_text()158 value_getter = self._value_getters_by_key.get(key, self._get_remote_workers)159 weather_dict.update({key: [(months[i],) + value_getter(col) for i, col in enumerate(cols[1:])]})160 return weather_dict161 def _get_temperature(self, col):162 metric, desc = col.find("span", class_="metric"), col.find("span", class_="")163 return tuple([value.get_text(strip=True) for value in [metric, desc]])164 def _get_weather_indexes(self, col):165 if contents := col.span.contents:166 return contents[2] if len(contents) > 2 else None, contents[0]167 return None, None168 def _get_remote_workers(self, col):169 return col.span.get_text(strip=False), None170class PhotosTabScrapper(TabScrapper):171 """Class that knows how to scrap data from the Photos tab."""172 def __init__(self, soup, **kwargs):173 super().__init__(soup, **kwargs)174 self._tab = self._tab_scroller.find("div", class_="tab tab-photos")175 def _get_information(self):176 """Takes all the pictures from the tab, and returns an array with all of them."""177 return [photo.attrs["data-src"] for photo in self._tab.find_all("img", class_="lazyload")]178class CityGridTabScrapper(TabScrapper):179 """Class that knows how to handle tabs with a grid of cities.."""180 def _get_text(self, city):181 return city.find("div", class_="text").h3.a.text.replace(LATIN1_NON_BREAKING_SPACE, u' ')182 def _get_information(self):183 """Takes all the names of the cities in the grid. Returns an array with all the names."""184 grid = self._tab.find("div", class_="details grid show")185 cities = grid.find_all("li", attrs={'data-type': 'city'})186 return [self._get_text(city) for city in cities]187class NearTabScrapper(CityGridTabScrapper):188 """Class that knows how to scrap data from the Near tab."""189 def __init__(self, soup, **kwargs):190 super().__init__(soup, **kwargs)191 self._tab = self._tab_scroller.find("div", class_="tab tab-near")192class NextTabScrapper(CityGridTabScrapper):193 """Class that knows how to scrap data from the Next tab."""194 def __init__(self, soup, **kwargs):195 super().__init__(soup, **kwargs)196 self._tab = self._tab_scroller.find("div", class_="tab tab-next")...

Full Screen

Full Screen

parsertest.py

Source:parsertest.py Github

copy

Full Screen

...34 self.assert_ (video, "The URL {0} tested was invalid".format (url))35 video.getVideoInformation ()36 def test_youtube_parser (self):37 url = test_dict["youtube"]38 self._get_information(url)39 40 def test_pornotube_parser (self):41 url = test_dict["pornotube"]42 self._get_information(url)43 def test_redtube_parser (self):44 url = test_dict["redtube"]45 self._get_information(url)46 def test_veoh_parser (self):47 url = test_dict["veoh"]48 self._get_information(url)49 def test_veoh_portal_parser (self):50 url = test_dict["veoh_portal"]51 self._get_information(url)52 def test_youporn_parser (self):53 url = test_dict["youporn"]54 self._get_information(url)55 def test_google_video_parser (self):56 url = test_dict["google_video"]57 self._get_information(url)58 def test_metacafe_parser (self):59 url = test_dict["metacafe"]60 self._get_information(url)61 def test_pornhub_parser (self):62 url = test_dict["pornhub"]63 self._get_information(url)64 def test_tube8_parser (self):65 url = test_dict["tube8"]66 self._get_information(url)67 def test_myvideo_parser (self):68 url = test_dict["myvideo"]69 self._get_information(url)70 def test_myspacetv_parser (self):71 url = test_dict["myspacetv"]72 self._get_information(url)73 def test_guba_parser (self):74 url = test_dict["guba"]75 self._get_information(url)76 def test_dailymotion_parser (self):77 url = test_dict["dailymotion"]78 self._get_information(url)79 def test_giantbomb_parser (self):80 url = test_dict["giantbomb"]81 self._get_information(url)82 def test_screwattack_parser (self):83 url = test_dict["screwattack"]84 self._get_information(url)85 def test_gametrailers_parser (self):86 url = test_dict["gametrailers"]87 self._get_information(url)88 def test_escapistmagazine_parser (self):89 url = test_dict["escapistmagazine"]90 self._get_information(url)91 def test_yahoovideo_parser (self):92 url = test_dict["yahoovideo"]93 self._get_information(url)94 def test_padding (self):95 """Test that proper padding is done in the VideoItem.buildCommandList method"""96 youtube_video = parser_manager.validateURL (test_dict["youtube"])97 youtube_video.title = "Tester"98 youtube_video.setFilePaths (".")99 #youtube_video.setOutputRes (youtube_video.RES_640)100 # Testing against http://www.youtube.com/watch?v=_leYvPpmJGg specs101 command_list = youtube_video.buildCommandList (96, 384, 320, 180)102 padtop = padbottom = None103 for i, value in enumerate (command_list):104 if value == "-padtop": padtop = command_list[i+1]105 elif value == "-padbottom": padbottom = command_list[i+1]106 self.assertEqual (padtop, "30")107 self.assertEqual (padbottom, "30")...

Full Screen

Full Screen

parser.py

Source:parser.py Github

copy

Full Screen

...33 parsed = BeautifulSoup(html, 'html.parser')34 result = parsed.find(string=re.compile(r'(does\snot\sexist.)'))35 return not bool(result)36 @staticmethod37 def _get_information(result):38 """ Recupera a informação padrão do HTML. """39 if result:40 return result.find_next('td').text.strip()41 def extract_value(self, html, value):42 """ Recupera o valor do HTML. """43 result = html.find('td', string=value)44 return self._get_information(result)45 def extract_last_login(self, html, value):46 """ Recupera o valor do HTML. """47 result = html.find('td', string=value)48 return normalize_text(self._get_information(result))49 def extract_account_status(self, html):50 """ Recupera o valor do HTML. """51 result = html.find('td', string=re.compile(self.ACCOUNT_STATUS_REGEX))52 return self._get_information(result)53 @staticmethod54 def extract_deaths(html):55 """ Recupera a informação """56 text = html.find('b', string='Character Deaths')57 result = []58 if text:59 rows = text.find_all_next('tr')60 for item in rows:61 if item.text == 'Account Information':62 break63 # Recupera o 1o. TD dentro da TR64 timestamp = normalize_text(item.select_one('td:nth-of-type(1)').text.strip())65 # Recupera o 2o. TD dentro da TR66 description = normalize_text(item.select_one('td:nth-of-type(2)').text.strip())...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful