How to use async_session method in yandex-tank

Best Python code snippet using yandex-tank

news_crawler.py

Source:news_crawler.py Github

copy

Full Screen

1from odoo import models, fields, api2from gnews import GNews3from newspaper import Article, Config4from datetime import datetime, timedelta5import requests6import json7import asyncio8import time9import urllib10import newspaper11import logging12import re13from bs4 import BeautifulSoup14from requests_html import AsyncHTMLSession15_logger = logging.getLogger(__name__)16class news_crawler(models.Model):17 _name = 'news_crawler'18 _description = '根據關鍵字爬取 News'19 name = fields.Char('新聞標題')20 publisher = fields.Char('發布者')21 url = fields.Char('連結')22 date = fields.Datetime('發布時間')23 keyword = fields.Char('關鍵字')24 token = 'here'25 headers = {26 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \27 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36'28 }29 def line_notify(self, token, msg): # , picURI):30 url = "https://notify-api.line.me/api/notify"31 headers = {32 "Authorization": "Bearer " + token33 }34 payload = {'message': msg}35 # , files = files)36 r = requests.post(url, headers=headers, params=payload)37 return r.status_code38 def all_keywords(*keywords, type):39 all_words = ''40 if type == 'urlencode':41 for word in keywords[1:]:42 all_words += urllib.parse.quote(str(word)) + '%20'43 return all_words[0:-3]44 else:45 for word in keywords[1:]:46 all_words += str(word) + '&'47 return all_words[0:-1]48 def get_google_news(self, keyword, token=token):49 google_news = GNews(language='zh-Hant', country='TW', period='4h')50 news = google_news.get_news(keyword)51 news_count = len(news)52 config = Config()53 config.request_timeout = 1054 config.browser_user_agent = self.headers['user-agent']55 for i in range(news_count):56 # 去除發布者 & 將全形space取代為半形space57 title = news[i]['title'].split(' - ')[0].replace('\u3000', ' ')58 url = news[i]['url'].replace(59 'https://m.ltn', 'https://news.ltn') # 如果有m版網址,將其取代60 publisher = news[i]['publisher']['title']61 date_string = news[i]['published date']62 date_format = "%a, %d %b %Y %H:%M:%S GMT"63 published_date = datetime.strptime(date_string, date_format)64 expect_time = datetime.today() - timedelta(hours=1)65 article = Article(url, config=config)66 try:67 _logger.debug('===================================')68 _logger.debug(69 f'keyword: {keyword} publisher: {publisher} token: {token}')70 article.download()71 article.parse()72 if keyword in article.text and 'from' not in url and 'yahoo' not in url:73 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:74 if published_date >= expect_time:75 create_record = self.create({76 'id': 1,77 'name': title,78 'publisher': publisher,79 'url': url,80 'date': published_date,81 'keyword': keyword82 })83 self.env.cr.commit()84 if create_record:85 line_token = self.env['config_token'].search(86 [('env_name', '=', token)]).line_token87 # 發送 Line Notify 訊息88 self.line_notify(89 line_token, title + " 〔" + keyword + "〕 " + url)90 else:91 break92 else:93 break94 except newspaper.article.ArticleException:95 continue96 async def get_udn_news(self, async_session, *keywords, token=token):97 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')98 keyword = self.all_keywords(*keywords, type='string')99 _logger.debug(*keywords)100 _logger.debug('get_udn_news')101 _logger.debug(keyword_urlencode)102 _logger.debug(keyword)103 udn_url = 'https://udn.com/api/more?page=0&id=search:' + \104 keyword_urlencode + '&channelId=2&type=searchword'105 res = requests.get(url=udn_url, headers=self.headers)106 news = res.json()['lists']107 publisher = 'UDN聯合新聞網'108 for i, v in enumerate(news):109 url = news[i]['titleLink']110 title = news[i]['title'].replace(111 '\u3000', ' ') # 將全形space取代為半形space112 date_string = news[i]['time']['date']113 date_format = "%Y-%m-%d %H:%M:%S"114 published_date = datetime.strptime(date_string, date_format)115 expect_time = datetime.today() - timedelta(hours=1)116 _logger.debug('===================================')117 _logger.debug(118 f'keyword: {keyword} publisher: {publisher} token: {token}')119 if 'from' not in url:120 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:121 if published_date >= expect_time:122 create_record = self.create({123 'id': 1,124 'name': title,125 'publisher': publisher,126 'url': url,127 'date': published_date - timedelta(hours=8),128 'keyword': keyword129 })130 self.env.cr.commit()131 if create_record:132 line_token = self.env['config_token'].search(133 [('env_name', '=', token)]).line_token134 # 發送 Line Notify 訊息135 self.line_notify(136 line_token, title + " 〔" + keyword + "〕 " + url)137 else:138 break139 else:140 break141 async def get_apple_news(self, async_session, *keywords, token=token):142 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')143 keyword = self.all_keywords(*keywords, type='string')144 apple_url = 'https://tw.appledaily.com/search/' + keyword_urlencode145 res = requests.get(url=apple_url, headers=self.headers)146 soup = BeautifulSoup(res.text, 'html.parser')147 publish = soup.select('div.timestamp')148 for i, v in enumerate(publish):149 title = soup.select('span.headline')[i].text150 date_string = soup.select('div.timestamp')[i].text151 date_format = "出版時間:%Y/%m/%d %H:%M"152 published_date = datetime.strptime(date_string, date_format)153 publisher = '蘋果新聞網'154 url = 'https://tw.appledaily.com/' + \155 soup.select('a.story-card')[i].get('href')156 expect_time = datetime.today() - timedelta(hours=8)157 _logger.debug('===================================')158 _logger.debug(159 f'keyword: {keyword} publisher: {publisher} token: {token}')160 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:161 if published_date >= expect_time:162 create_record = self.create({163 'id': 1,164 'name': title,165 'publisher': publisher,166 'url': url,167 'date': published_date - timedelta(hours=8),168 'keyword': keyword169 })170 self.env.cr.commit()171 if create_record:172 line_token = self.env['config_token'].search(173 [('env_name', '=', token)]).line_token174 # 發送 Line Notify 訊息175 self.line_notify(line_token, title +176 " 〔" + keyword + "〕 " + url)177 else:178 break179 else:180 break181 async def get_ltn_news(self, async_session, *keywords, token=token):182 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')183 keyword = self.all_keywords(*keywords, type='string')184 url = 'https://search.ltn.com.tw/list?keyword=' + keyword_urlencode185 res = requests.get(url=url, headers=self.headers)186 soup = BeautifulSoup(res.text, 'html.parser')187 titles = soup.find_all("a", class_="tit")188 publisher = '自由時報電子報'189 for i, v in enumerate(titles):190 title = titles[i]['title'].replace(191 '\u3000', ' ') # 將全形space取代為半形space192 url = titles[i]['href']193 _logger.debug('===================================')194 _logger.debug(195 f'keyword: {keyword} publisher: {publisher} token: {token}')196 try:197 res = requests.get(url=url, headers=self.headers, timeout=10)198 soup = BeautifulSoup(res.text, 'html.parser')199 publish = soup.select('span.time')[0].text.replace(200 '\n ', '').replace('\r', '')201 if publish == "":202 publish = soup.select('span.time')[1].text.replace(203 '\n ', '').replace('\r', '')204 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:205 date_format = "%Y/%m/%d %H:%M"206 published_date = datetime.strptime(publish, date_format)207 expect_time = datetime.today() - timedelta(hours=1)208 if published_date >= expect_time:209 create_record = self.create({210 'id': 1,211 'name': title,212 'publisher': publisher,213 'url': url,214 'date': published_date - timedelta(hours=8),215 'keyword': keyword216 })217 self.env.cr.commit()218 if create_record:219 line_token = self.env['config_token'].search(220 [('env_name', '=', token)]).line_token221 # 發送 Line Notify 訊息222 self.line_notify(223 line_token, title + " 〔" + keyword + "〕 " + url)224 else:225 break226 else:227 break228 except requests.exceptions.RequestException as e: # This is the correct syntax:229 continue230 async def get_setn_news(self, async_session, *keywords, token=token):231 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')232 keyword = self.all_keywords(*keywords, type='string')233 url = 'https://www.setn.com/search.aspx?q=' + keyword_urlencode + '&r=0'234 res = requests.get(url=url, headers=self.headers)235 soup = BeautifulSoup(res.text, 'html.parser')236 titles = soup.select('div.newsimg-area-text-2')237 url_tag = soup.select("div.newsimg-area-info > a.gt ")238 dates = soup.select('div.newsimg-date')239 publisher = '三立新聞網'240 for i, v in enumerate(titles):241 title = titles[i].text.replace('\u3000', ' ') # 將全形space取代為半形space242 date_string = dates[i].text243 url = 'https://www.setn.com/' + \244 url_tag[i].get('href').replace('&From=Search', '')245 date_format = "%Y/%m/%d %H:%M"246 published_date = datetime.strptime(date_string, date_format)247 expect_time = datetime.today() - timedelta(hours=1)248 _logger.debug('===================================')249 _logger.debug(250 f'keyword: {keyword} publisher: {publisher} token: {token}')251 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:252 if published_date >= expect_time:253 create_record = self.create({254 'id': 1,255 'name': title,256 'publisher': publisher,257 'url': url,258 'date': published_date - timedelta(hours=8),259 'keyword': keyword260 })261 self.env.cr.commit()262 if create_record:263 line_token = self.env['config_token'].search(264 [('env_name', '=', token)]).line_token265 # 發送 Line Notify 訊息266 self.line_notify(line_token, title +267 " 〔" + keyword + "〕 " + url)268 else:269 break270 else:271 break272 async def get_ettoday_news(self, async_session, *keywords, token=token):273 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')274 keyword = self.all_keywords(*keywords, type='string')275 url = 'https://www.ettoday.net/news_search/doSearch.php?search_term_string=' + \276 keyword_urlencode277 res = requests.get(url=url, headers=self.headers)278 soup = BeautifulSoup(res.text, 'html.parser')279 titles = soup.select('h2 > a')280 date = soup.select('span.date')281 publisher = 'ETtoday新聞雲'282 for i, v in enumerate(titles):283 title = titles[i].text.replace('\u3000', ' ') # 將全形space取代為半形space284 url = titles[i].get('href')285 publish = date[i].text.split('/')[1].replace(' ', '')286 date_format = "%Y-%m-%d%H:%M)"287 published_date = datetime.strptime(publish, date_format)288 _logger.debug('===================================')289 _logger.debug(290 f'keyword: {keyword} publisher: {publisher} token: {token}')291 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:292 expect_time = datetime.today() - timedelta(hours=1)293 if published_date >= expect_time:294 create_record = self.create({295 'id': 1,296 'name': title,297 'publisher': publisher,298 'url': url,299 'date': published_date - timedelta(hours=8),300 'keyword': keyword301 })302 self.env.cr.commit()303 if create_record:304 line_token = self.env['config_token'].search(305 [('env_name', '=', token)]).line_token306 # 發送 Line Notify 訊息307 self.line_notify(line_token, title +308 " 〔" + keyword + "〕 " + url)309 else:310 break311 else:312 break313 async def get_tvbs_news(self, async_session, *keywords, token=token):314 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')315 keyword = self.all_keywords(*keywords, type='string')316 url = 'https://news.tvbs.com.tw/news/searchresult/' + keyword_urlencode + '/news'317 res = requests.get(url=url, headers=self.headers)318 soup = BeautifulSoup(res.text, 'html.parser')319 titles = soup.select('h2')320 publisher = 'TVBS新聞網'321 for i, v in enumerate(titles):322 title = titles[i].text.replace('\u3000', ' ') # 將全形space取代為半形space323 each_url = titles[i].find_parents("a")[0].get('href')324 res = requests.get(url=each_url, headers=self.headers)325 soup = BeautifulSoup(res.text, 'html.parser')326 sult = "發佈時間:\d\d\d\d\/\d\d\/\d\d \d\d:\d\d"327 match = re.search(sult, soup.select('div.author')[0].text)328 dateFormatter = "發佈時間:%Y/%m/%d %H:%M"329 published_date = datetime.strptime(match.group(), dateFormatter)330 expect_time = datetime.today() - timedelta(hours=8)331 _logger.debug('===================================')332 _logger.debug(333 f'keyword: {keyword} publisher: {publisher} token: {token}')334 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:335 expect_time = datetime.today() - timedelta(hours=1)336 if published_date >= expect_time:337 create_record = self.create({338 'id': 1,339 'name': title,340 'publisher': publisher,341 'url': url,342 'date': published_date - timedelta(hours=8),343 'keyword': keyword344 })345 self.env.cr.commit()346 if create_record:347 line_token = self.env['config_token'].search(348 [('env_name', '=', token)]).line_token349 # 發送 Line Notify 訊息350 self.line_notify(line_token, title +351 " 〔" + keyword + "〕 " + url)352 else:353 break354 else:355 break356 async def get_china_news(self, async_session, *keywords, token=token):357 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')358 keyword = self.all_keywords(*keywords, type='string')359 url = 'https://www.chinatimes.com/search/' + keyword_urlencode + '?chdtv'360 res = requests.get(url=url, headers=self.headers)361 soup = BeautifulSoup(res.text, 'html.parser')362 titles = soup.select('h3 > a')363 dates = soup.select('time')364 publisher = '中時新聞網'365 for i in range(len(titles)):366 title = titles[i].text.replace('\u3000', ' ') # 將全形space取代為半形space367 url = titles[i].get('href')368 date_string = dates[i].get('datetime')369 date_format = "%Y-%m-%d %H:%M"370 published_date = datetime.strptime(date_string, date_format)371 expect_time = datetime.today() - timedelta(hours=1)372 _logger.debug('===================================')373 _logger.debug(374 f'keyword: {keyword} publisher: {publisher} token: {token}')375 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:376 if published_date >= expect_time:377 create_record = self.create({378 'id': 1,379 'name': title,380 'publisher': publisher,381 'url': url,382 'date': published_date - timedelta(hours=8),383 'keyword': keyword384 })385 self.env.cr.commit()386 if create_record:387 line_token = self.env['config_token'].search(388 [('env_name', '=', token)]).line_token389 # 發送 Line Notify 訊息390 self.line_notify(line_token, title +391 " 〔" + keyword + "〕 " + url)392 else:393 break394 else:395 break396 async def get_storm_news(self, async_session, *keywords, token=token):397 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')398 keyword = self.all_keywords(*keywords, type='string')399 url = 'https://www.storm.mg/site-search/result?q=' + \400 keyword_urlencode + '&order=none&format=week'401 res = requests.get(url=url, headers=self.headers)402 soup = BeautifulSoup(res.text, 'html.parser')403 titles = soup.select('p.card_title')404 urls = soup.select('a.card_substance')405 publish_dates = soup.select('span.info_time')406 publisher = '風傳媒'407 for i, v in enumerate(titles):408 title = titles[i].text.replace('\u3000', ' ') # 將全形space取代為半形space409 url = 'https://www.storm.mg' + \410 urls[i].get('href').replace('?kw='+keyword+'&pi=0', '')411 publish_date = publish_dates[i].text412 date_format = "%Y-%m-%d %H:%M"413 published_date = datetime.strptime(publish_date, date_format)414 expect_time = datetime.today() - timedelta(hours=1)415 _logger.debug('===================================')416 _logger.debug(417 f'keyword: {keyword} publisher: {publisher} token: {token}')418 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:419 if published_date >= expect_time:420 create_record = self.create({421 'id': 1,422 'name': title,423 'publisher': publisher,424 'url': url,425 'date': published_date - timedelta(hours=8),426 'keyword': keyword427 })428 self.env.cr.commit()429 if create_record:430 line_token = self.env['config_token'].search(431 [('env_name', '=', token)]).line_token432 # 發送 Line Notify 訊息433 self.line_notify(line_token, title +434 " 〔" + keyword + "〕 " + url)435 else:436 break437 else:438 break439 async def get_ttv_news(self, async_session, *keywords, token=token):440 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')441 keyword = self.all_keywords(*keywords, type='string')442 url = 'https://news.ttv.com.tw/search/' + keyword_urlencode443 res = requests.get(url=url, headers=self.headers)444 soup = BeautifulSoup(res.text, 'html.parser')445 titles = soup.select('div.title')446 urls = soup.select('ul > li > a.clearfix')447 publishes = soup.select('div.time')448 publisher = '台視新聞網'449 for i, v in enumerate(urls):450 url = 'https://news.ttv.com.tw/'+urls[i].get('href')451 # 將全形space取代為半形space452 title = titles[i+2].text.replace('\u3000', ' ')453 publish = publishes[i].text454 date_format = "%Y/%m/%d %H:%M:%S"455 published_date = datetime.strptime(publish, date_format)456 expect_time = datetime.today() - timedelta(hours=1)457 _logger.debug('===================================')458 _logger.debug(459 f'keyword: {keyword} publisher: {publisher} token: {token}')460 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:461 if published_date >= expect_time:462 create_record = self.create({463 'id': 1,464 'name': title,465 'publisher': publisher,466 'url': url,467 'date': published_date - timedelta(hours=8),468 'keyword': keyword469 })470 self.env.cr.commit()471 if create_record:472 line_token = self.env['config_token'].search(473 [('env_name', '=', token)]).line_token474 # 發送 Line Notify 訊息475 self.line_notify(line_token, title +476 " 〔" + keyword + "〕 " + url)477 else:478 break479 else:480 break481 async def get_ftv_news(self, async_session, *keywords, token=token):482 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')483 keyword = self.all_keywords(*keywords, type='string')484 url = 'https://www.ftvnews.com.tw/search/' + keyword_urlencode485 res = requests.get(url=url, headers=self.headers)486 soup = BeautifulSoup(res.text, 'html.parser')487 titles = soup.select('div.title')488 urls = soup.select('ul > li > a.clearfix')489 publishes = soup.select('div.time')490 publisher = '民視新聞網'491 for i, v in enumerate(urls):492 url = 'https://www.ftvnews.com.tw'+urls[i].get('href')493 title = titles[i].text.replace('\u3000', ' ') # 將全形space取代為半形space494 publish = publishes[i].text495 date_format = "%Y/%m/%d %H:%M:%S"496 published_date = datetime.strptime(publish, date_format)497 expect_time = datetime.today() - timedelta(hours=1)498 _logger.debug('===================================')499 _logger.debug(500 f'keyword: {keyword} publisher: {publisher} token: {token}')501 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:502 if published_date >= expect_time:503 create_record = self.create({504 'id': 1,505 'name': title,506 'publisher': publisher,507 'url': url,508 'date': published_date - timedelta(hours=8),509 'keyword': keyword510 })511 self.env.cr.commit()512 if create_record:513 line_token = self.env['config_token'].search(514 [('env_name', '=', token)]).line_token515 # 發送 Line Notify 訊息516 self.line_notify(line_token, title +517 " 〔" + keyword + "〕 " + url)518 else:519 break520 else:521 break522 async def get_cna_news(self, async_session, *keywords, token=token):523 keyword_urlencode = self.all_keywords(*keywords, type='urlencode')524 keyword = self.all_keywords(*keywords, type='string')525 url = 'https://www.cna.com.tw/search/hysearchws.aspx?q=' + keyword_urlencode526 res = requests.get(url=url, headers=self.headers)527 soup = BeautifulSoup(res.text, 'html.parser')528 urls = soup.select('ul.mainList > li > a')529 titles = soup.select('div.listInfo > h2')530 dates = soup.select('div.date')531 publisher = 'CNA中央社'532 for i, v in enumerate(urls):533 url = urls[i].get('href')534 title = titles[i].text.replace('\u3000', ' ') # 將全形space取代為半形space535 publish = dates[i].text536 date_format = "%Y/%m/%d %H:%M"537 published_date = datetime.strptime(publish, date_format)538 expect_time = datetime.today() - timedelta(hours=1)539 _logger.debug('===================================')540 _logger.debug(541 f'keyword: {keyword} publisher: {publisher} token: {token}')542 if len(self.search([("url", "=", url)])) == 0 and len(self.search([("name", "=", title)])) == 0:543 if published_date >= expect_time:544 create_record = self.create({545 'id': 1,546 'name': title,547 'publisher': publisher,548 'url': url,549 'date': published_date - timedelta(hours=8),550 'keyword': keyword551 })552 self.env.cr.commit()553 if create_record:554 line_token = self.env['config_token'].search(555 [('env_name', '=', token)]).line_token556 # 發送 Line Notify 訊息557 self.line_notify(line_token, title +558 " 〔" + keyword + "〕 " + url)559 else:560 break561 else:562 break563 async def main(self, *keywords, token=token):564 ''' 將所有取得新聞的func集合 '''565 async_session = AsyncHTMLSession()566 udn_task = self.get_udn_news(async_session, *keywords, token=token)567 apple_task = self.get_apple_news(async_session, *keywords, token=token)568 setn_task = self.get_setn_news(async_session, *keywords, token=token)569 ettoday_task = self.get_ettoday_news(570 async_session, *keywords, token=token)571 tvbs_task = self.get_tvbs_news(async_session, *keywords, token=token)572 china_task = self.get_china_news(async_session, *keywords, token=token)573 storm_task = self.get_storm_news(async_session, *keywords, token=token)574 ttv_task = self.get_ttv_news(async_session, *keywords, token=token)575 ftv_task = self.get_ftv_news(async_session, *keywords, token=token)576 ltn_task = self.get_ltn_news(async_session, *keywords, token=token)577 cna_task = self.get_cna_news(async_session, *keywords, token=token)578 return await asyncio.gather(udn_task, apple_task, setn_task, ettoday_task, tvbs_task, china_task, storm_task, ttv_task, ftv_task, ltn_task, cna_task)579 def run_main(self, *keywords, token=token):580 ''' for Odoo server call a single func'''...

Full Screen

Full Screen

commands.py

Source:commands.py Github

copy

Full Screen

...3from utils.db_api.db import async_session4from utils.db_api.models import Users, Items5from utils.misc.hash_coded import encode6async def get_items(query: str, offset: int):7 async with async_session() as session:8 if query:9 results = await session.execute(select(Items).filter(func.lower(Items.name).10 like(f'%{query.lower()}%')).order_by(Items.name).offset(offset).limit(20))11 else:12 results = await session.execute(select(Items).order_by(Items.name).offset(offset).limit(20))13 return results.scalars()14async def get_user_on_code(code: str):15 async with async_session() as session:16 result = await session.execute(select(Users).where(Users.code == code))17 return result.scalars().first()18async def get_user(user_id: int):19 async with async_session() as session:20 result = await session.execute(select(Users).where(Users.user_id == user_id))21 return result.scalars().first()22async def add_user(user: Users):23 async with async_session() as session:24 async with session.begin():25 session.add(user)26async def add_referral_user(referrer: Users, user_id: int, user_fullname: str):27 async with async_session() as session:28 stmt = update(Users).where(Users.code == referrer.code).\29 values(balance=referrer.balance + 10).returning(Users.code)30 user_invited = Users(user_id=user_id,31 name=user_fullname,32 balance=0,33 code=encode(user_id)[2:-1],34 invited=referrer.user_id)35 await session.add(user_invited)36 await session.execute(stmt)37 await session.commit()38 return user_invited39async def update_user(text: dict, user_id: int):40 async with async_session() as session:41 stmt = update(Users).where(Users.user_id == user_id).values(text).returning(Users.balance)42 await session.execute(stmt)43 await session.commit()44async def verify_admin(admin_id: int, admin_full_name: str):45 admin = Users(user_id=admin_id, name=admin_full_name,46 balance=1_000_000, code=str(encode(admin_id))[2:-1])47 await add_user(admin)48 return admin49async def add_item(item: Items):50 async with async_session() as session:51 async with session.begin():52 session.add(item)53async def get_item(item_id: int):54 async with async_session() as session:55 results = await session.execute(select(Items).where(Items.item_id == item_id))56 return results.scalars().first()57def to_(self):58 names_item = {'name': self.name,59 'description': self.description,60 'price': self.price,61 'pic': self.thumb_url}62 return names_item63async def update_item(data):64 old = data['old']65 for item in ['name', 'description', 'price', 'pic']:66 item_ = data[item]67 if item_ is None:68 continue69 async with async_session() as session:70 stmt = update(Items).where(Items.item_id == old.item_id).values({item: item_}). \71 returning(to_(Items)[item])72 await session.execute(stmt)73 await session.commit()74 return old75async def delete_item(data, call):76 item_id = data['item_id']77 async with async_session() as session:78 results = await session.execute(select(Items).where(Items.item_id == int(item_id)))79 item = results.scalars().first()80 if item is None:81 await call.message.edit_text('Товар не найден! :/')82 return False83 await session.delete(item)84 await session.commit()...

Full Screen

Full Screen

dal.py

Source:dal.py Github

copy

Full Screen

...7 f"postgresql+asyncpg://{dbLogin}:{dbPass}@postgres_db_container/{dbName}"8)9async_session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession)10async def has_question(question_id: int) -> bool:11 async with async_session() as session:12 async with session.begin():13 stmt = select(14 select(Question).where(Question.question_id == question_id).exists()15 )16 result = await session.execute(stmt)17 res = result.scalar()18 return res19async def add_question(question_id: int, question_text: str, answer_text: str) -> None:20 async with async_session() as session:21 async with session.begin():22 session.add(23 Question(24 question_id=question_id,25 question_text=question_text,26 answer_text=answer_text,27 )28 )29 await session.commit()30async def get_last_question() -> Question:31 async with async_session() as session:32 async with session.begin():33 stmt = select(Question).order_by(-Question.id)34 result = await session.execute(stmt)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run yandex-tank automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful