How to use domain_name method in localstack

Best Python code snippet using localstack_python

dnspod.py

Source:dnspod.py Github

copy

Full Screen

1#coding: utf-82try:3 import urllib.parse as urlparse4except:5 import urlparse6import requests7from . import common8class DNSPodDns(common.BaseDns):9 """10 """11 dns_provider_name = "dnspod"12 def __init__(self, DNSPOD_ID, DNSPOD_API_KEY, DNSPOD_API_BASE_URL="https://dnsapi.cn/"):13 self.DNSPOD_ID = DNSPOD_ID14 self.DNSPOD_API_KEY = DNSPOD_API_KEY15 self.DNSPOD_API_BASE_URL = DNSPOD_API_BASE_URL16 self.HTTP_TIMEOUT = 65 # seconds17 self.DNSPOD_LOGIN = "{0},{1}".format(self.DNSPOD_ID, self.DNSPOD_API_KEY)18 if DNSPOD_API_BASE_URL[-1] != "/":19 self.DNSPOD_API_BASE_URL = DNSPOD_API_BASE_URL + "/"20 else:21 self.DNSPOD_API_BASE_URL = DNSPOD_API_BASE_URL22 super(DNSPodDns, self).__init__()23 def extract_zone(self,domain_name):24 domain_name = domain_name.lstrip("*.")25 top_domain_list = ['.ac.cn', '.ah.cn', '.bj.cn', '.com.cn', '.cq.cn', '.fj.cn', '.gd.cn', 26 '.gov.cn', '.gs.cn', '.gx.cn', '.gz.cn', '.ha.cn', '.hb.cn', '.he.cn', 27 '.hi.cn', '.hk.cn', '.hl.cn', '.hn.cn', '.jl.cn', '.js.cn', '.jx.cn', 28 '.ln.cn', '.mo.cn', '.net.cn', '.nm.cn', '.nx.cn', '.org.cn']29 old_domain_name = domain_name30 m_count = domain_name.count(".")31 top_domain = "."+".".join(domain_name.rsplit('.')[-2:])32 new_top_domain = "." + top_domain.replace(".","")33 is_tow_top = False34 if top_domain in top_domain_list:35 is_tow_top = True36 domain_name = domain_name[:-len(top_domain)] + new_top_domain37 if domain_name.count(".") > 1:38 zone, middle, last = domain_name.rsplit(".", 2) 39 acme_txt = "_acme-challenge.%s" % zone40 if is_tow_top: last = top_domain[1:]41 root = ".".join([middle, last])42 else:43 zone = ""44 root = old_domain_name45 acme_txt = "_acme-challenge"46 return root, zone, acme_txt47 def create_dns_record(self, domain_name, domain_dns_value):48 # if we have been given a wildcard name, strip wildcard49 #domain_name = domain_name.lstrip("*.")50 #subd = ""51 #if domain_name.count(".") != 1: # not top level domain52 # pos = domain_name.rfind(".", 0, domain_name.rfind("."))53 # subd = domain_name[:pos]54 # domain_name = domain_name[pos + 1 :]55 # if subd != "":56 # subd = "." + subd57 domain_name,_,subd = self.extract_zone(domain_name)58 url = urlparse.urljoin(self.DNSPOD_API_BASE_URL, "Record.Create")59 body = {60 "record_type": "TXT",61 "domain": domain_name,62 "sub_domain": subd,63 "value": domain_dns_value,64 "record_line_id": "0",65 "format": "json",66 "login_token": self.DNSPOD_LOGIN,67 }68 print(body)69 create_dnspod_dns_record_response = requests.post(70 url, data=body, timeout=self.HTTP_TIMEOUT71 ).json()72 if create_dnspod_dns_record_response["status"]["code"] != "1":73 # raise error so that we do not continue to make calls to ACME74 # server75 raise ValueError(76 "Error creating dnspod dns record: status_code={status_code} response={response}".format(77 status_code=create_dnspod_dns_record_response["status"]["code"],78 response=create_dnspod_dns_record_response["status"]["message"],79 )80 )81 def delete_dns_record(self, domain_name, domain_dns_value):82 #domain_name = domain_name.lstrip("*.")83 #subd = ""84 #if domain_name.count(".") != 1: # not top level domain85 # pos = domain_name.rfind(".", 0, domain_name.rfind("."))86 # subd = domain_name[:pos]87 # domain_name = domain_name[pos + 1 :]88 # if subd != "":89 # subd = "." + subd90 domain_name,_,subd = self.extract_zone(domain_name)91 url = urllib.parse.urljoin(self.DNSPOD_API_BASE_URL, "Record.List")92 # pos = domain_name.rfind(".",0, domain_name.rfind("."))93 subdomain = subd94 rootdomain = domain_name95 body = {96 "login_token": self.DNSPOD_LOGIN,97 "format": "json",98 "domain": rootdomain,99 "subdomain": subdomain,100 "record_type": "TXT",101 }102 print(body)103 list_dns_response = requests.post(url, data=body, timeout=self.HTTP_TIMEOUT).json()104 for i in range(0, len(list_dns_response["records"])):105 rid = list_dns_response["records"][i]["id"]106 urlr = urllib.parse.urljoin(self.DNSPOD_API_BASE_URL, "Record.Remove")107 bodyr = {108 "login_token": self.DNSPOD_LOGIN,109 "format": "json",110 "domain": rootdomain,111 "record_id": rid,112 }113 delete_dns_record_response = requests.post(114 urlr, data=bodyr, timeout=self.HTTP_TIMEOUT...

Full Screen

Full Screen

google_search.py

Source:google_search.py Github

copy

Full Screen

...62 if domain_name in legit_sites:63 true_points += 164 if domain_name in illegit_sites:65 false_points += 166def get_the_domain_name(complete_link):67 broken_url = urlparse(complete_link)68 domain_name = '{uri.scheme}://{uri.netloc}/'.format(uri=broken_url)69 if(domain_name[:5] == 'https'):70 domain_name = domain_name.replace('https://www.', '')71 domain_name = domain_name.replace('https://', '')72 if(domain_name[-1] == '/'):73 domain_name = domain_name[:-1]74 75 if(domain_name!="webcache.googleusercontent.com" and domain_name!="policies.google.com" and domain_name!="support.google.com"):76 check_if_legit(domain_name)77 domains_list.append(domain_name)78 return79 if(domain_name[:4] == 'http'):80 domain_name = domain_name.replace('http://www.', '')81 domain_name = domain_name.replace('https://', '')82 if(domain_name[-1] == '/'):83 domain_name = domain_name[:-1]84 if(domain_name!="webcache.googleusercontent.com" and domain_name!="policies.google.com" and domain_name!="support.google.com"):85 check_if_legit(domain_name)86 domains_list.append(domain_name)87def google_search_it(query):88 global true_points89 global false_points90 url = 'https://www.google.com/search?q=' + query91 headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'}92 html = requests.get(url, headers=headers)93 soup = BeautifulSoup(html.text, 'html.parser')94 results = soup.find('div', id='result-stats')95 stories = ''96 for links_a in soup.find_all('a'):97 98 if links_a.find('span'):99 t=(links_a.find('span').text)100 #print(t)101 if(t and (check_similarity(t,query)==1)):102 true_points += 1103 if(t and query in t):104 true_points += 1105 if links_a.has_attr('href'):106 complete_title = ''.join(links_a.findAll(text=True))107 complete_link = links_a['href']108 if links_a['href'][:4] != 'http':109 complete_link = links_a['href'][7:]110 111 get_the_domain_name(complete_link)112def start_predict(query):113 global true_points, false_points114 global legit_sites, illegit_sites115 true_points = 0116 false_points = 0117 with open("news\\true_dataset.txt") as true_data:118 legit_sites = true_data.readlines()119 legit_sites = [x.strip() for x in legit_sites]120 with open("news\\fake_dataset.txt") as fake_data:121 illegit_sites = fake_data.readlines()122 illegit_sites = [x.strip() for x in illegit_sites]123 fake_words = ['fake', 'hoax', 'lie', 'lies', 'lies','worst','false', 'illegitimate', 'rumour', 'counterfeit', 'forged', 'fictitious', 'fabricated', 'fraud']124 query_list = query.split()125 sumx=0.0...

Full Screen

Full Screen

site_checker.py

Source:site_checker.py Github

copy

Full Screen

1from urllib.parse import urlparse2def check_if_legit(domain_name):3 domain_name = domain_name.replace('http://', '')4 true_points = 05 if domain_name in illegit_sites:6 return "Fake / Malicious"7 if domain_name in legit_sites:8 return ("Authentic")9 return "Not found as a news aggregator"10def checker(complete_link):11 global legit_sites12 global illegit_sites13 with open("news\\true_dataset.txt") as true_data:14 legit_sites = true_data.readlines()15 legit_sites = [x.strip() for x in legit_sites]16 with open("news\\fake_dataset.txt") as fake_data:17 illegit_sites = fake_data.readlines()18 illegit_sites = [x.strip() for x in illegit_sites]19 broken_url = urlparse(complete_link)20 domain_name = '{uri.scheme}://{uri.netloc}/'.format(uri=broken_url)21 if(domain_name[:4] != 'http'):22 domain_name = "http://" + domain_name23 print(domain_name)24 if(domain_name[:5] == 'https'):25 domain_name = domain_name.replace('https://www.', '')26 domain_name = domain_name.replace('https://', '')27 28 if(domain_name[-1] == '/'):29 domain_name = domain_name[:-1]30 outputx = check_if_legit(domain_name)31 return outputx32 if(domain_name[:4] == 'http'):33 domain_name = domain_name.replace('http://www.', '')34 domain_name = domain_name.replace('https://', '')35 if(domain_name[-1] == '/'):36 domain_name = domain_name[:-1]37 outputx = check_if_legit(domain_name)38 return outputx39 domain_name = domain_name.replace('www.', '')40 if(domain_name[-1] == '/'):41 domain_name = domain_name[:-1]42 outputx = check_if_legit(domain_name)43 return outputx44legit_sites = []45illegit_sites = []46# pp = checker("http://www.ndtv.com")47# #print(legit_sites)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful