How to use abbrev_list method in autotest

Best Python code snippet using autotest_python

gs_svae.py

Source:gs_svae.py Github

copy

Full Screen

1import os, sys, json2import time, shutil, copy3import subprocess4import math5import multiprocessing as mp6import numpy as np7import matplotlib.pyplot as plt 8import comet_ml9import torch10import torch.nn as nn11import torch.utils12import torch.utils.data13from torch.backends import cudnn14from torchvision import datasets, transforms15from torch.autograd import Variable16from run_svae import main17from utils import get_gpu_info18 19def gs_main():20 f = open("config.json", "r")21 config = json.load(f)22 f.close()23 24 flatten_config = {}25 for key in config.keys():26 flatten_config.update(config[key])27 28 date = config["train"]["date"]29 number_of_date = config["train"]["number_of_date"]30 parallel_strategy_on = True31 direct_parse_strategy_on = True32 max_parallel_queues = min(6, mp.cpu_count())33 minimum_memory = 150034 gpu_id = config["train"]["gpu"]35 #device = torch.device("cuda", config["train"]["gpu"])36 #torch.cuda.manual_seed(config["train"]["seed"])37 38 gs_dict = {"mix":{"system":["EnKO", "FIVO", "IWAE"], "loss_type":["sumprod", "prodsum", "sumprod"]}, "mix2":{"seed":[2, 3], "gpu":[1,2]}}39# gs_dict = {"mix":{"system":["EnKO", "FIVO", "IWAE"], "loss_type":["sumprod", "prodsum", "sumprod"], "gpu":[0,1,2]},40# "outer_scale":[1.0, 2.0]}41# gs_dict = {"mix":{"system":["EnKO", "FIVO", "IWAE"], "gpu":[0,1,2]}}42# gs_dict = {"mix":{"system":["EnKO", "FIVO", "IWAE"], "loss_type":["sumprod", "prodsum", "sumprod"]},43# "n_particles":[8,16,32], "seed":[1,2,3]}44# gs_dict = {"inflation_method":["RTPP", "RTPS"], "inflation_factor":[0.1, 0.2, 0.3], "n_particles":[8,16,32],45# "mix":{"seed":[1,2,3], "gpu":[0,1,2]}}46# gs_dict = {"inflation_method":["RTPP", "RTPS"], "seed":[1,2,3], "mix":{"inflation_factor":[0.1, 0.2, 0.3], "gpu":[0,1,2]}}47# gs_dict = {"seed":[2,3], "mix":{"inflation_method":["RTPP", "RTPS"], "gpu":[1,2]}, "inflation_factor":[0.1, 0.2, 0.3]}48# gs_dict = {"mix":{"inflation_factor":[0.1, 0.2, 0.3], "gpu":[0,1,2]}}49# gs_dict = {"mix":{"loss_type":["prodsum", "sumprod"], "exclude_filtering_on":[False, True], "gpu":[0,1]}}50# gs_dict = {"mix":{"system":["EnKO", "EnKO", "FIVO", "IWAE"],51# "loss_type":["sumprod", "prodsum", "prodsum", "sumprod"],52# "exclude_filtering_on":[True, False, True, True], "gpu":[1,2,1,2]}}53# gs_dict = {"kld_penalty_weight":[0.1, 1.]}54# gs_dict = {"mix":{"kld_penalty_weight":[0, 1.], "gpu":[1,2]}, "inflation_factor":[0.2, 0.3]}55# gs_dict = {"mix":{"only_outer_learning_epochs":[0,10,0,10], "pretrain_epochs":[0,0,10,20]}}56# gs_dict = {"seed":[1,2,3]}57 gs_key = list(gs_dict.keys()) # list of keys for grid search58 gs_length = len(gs_dict)59 gs_key2 = []60 for key in gs_key:61 # if dictionary has hierarchical structure, add hierarchical keys to gs_key2 list.62 if type(gs_dict[key])==list:63 gs_key2.append(key)64 elif type(gs_dict[key])==dict:65 gs_key2 += list(gs_dict[key].keys())66 67 68 topic_name = ""69 for topic in ["system", "outer_model", "model", "data_name"]:70 if not (topic in gs_key2):71 if not (topic in ["system", "outer_model"] and config ["data"][topic] is None):72 topic_name += config["data"][topic] + "_"73 74 dir_name = "{}_{}gs{}/".format(date, topic_name, number_of_date)75 name_list = []76 config_list = []77 if direct_parse_strategy_on:78 parse_list = []79 initial_parse = []80 for key in gs_key2:81 initial_parse += ["--{}".format(key), None]82 83 84 def generate_queue_flatten_config(old_config, name, depth):85 key = gs_key[depth]86 87 if type(gs_dict[key])==list:88 for i, value in enumerate(gs_dict[key]):89 new_name = name90 new_config = copy.deepcopy(old_config)91 new_config[key] = value92 abbrev_list = key.split("_")93 for abbrev in abbrev_list:94 new_name += abbrev[0]95 new_name += str(value)96 if depth+1 < gs_length:97 generate_queue_flatten_config(new_config, new_name, depth+1)98 else:99 config_list.append(new_config)100 name_list.append(new_name)101 elif type(gs_dict[key])==dict:102 interlocking_key = list(gs_dict[key].keys())103 min_length = 10104 for ikey in interlocking_key:105 min_length = len(gs_dict[key][ikey]) if len(gs_dict[key][ikey]) < min_length else min_length106 for i in range(min_length):107 new_name = name108 new_config = copy.deepcopy(old_config)109 for ikey in interlocking_key:110 new_config[ikey] = gs_dict[key][ikey][i]111 abbrev_list = ikey.split("_")112 for abbrev in abbrev_list:113 new_name += abbrev[0]114 new_name += str(gs_dict[key][ikey][i])115 if depth+1 < gs_length:116 generate_queue_flatten_config(new_config, new_name, depth+1)117 else:118 config_list.append(new_config)119 name_list.append(new_name)120 else:121 raise ValueError("elements must be a list type object or a dict type object")122 123 124 def flatten_config_to_parse(config):125 parse_list = []126 for key in config.keys():127 parse_list.append("--{}".format(key))128 if type(config[key])==list:129 parse_list += [str(value) for value in config[key]]130 else:131 parse_list.append(str(config[key]))132 return parse_list133 134 135 def generate_queue_parse(old_parse, name, depth, total_depth):136 key = gs_key[depth]137 138 if type(gs_dict[key])==list:139 for i, value in enumerate(gs_dict[key]):140 new_name = name141 new_parse = copy.deepcopy(old_parse)142 new_parse[2*total_depth+1] = str(value)143 abbrev_list = key.split("_")144 for abbrev in abbrev_list:145 new_name += abbrev[0]146 new_name += str(value)147 if depth+1 < gs_length:148 generate_queue_parse(new_parse, new_name, depth+1, total_depth+1)149 else:150 parse_list.append(new_parse)151 name_list.append(new_name)152 elif type(gs_dict[key])==dict:153 inner_keys = list(gs_dict[key].keys())154 # arrange length of inner list155 min_length = 10156 for ikey in inner_keys:157 min_length = len(gs_dict[key][ikey]) if len(gs_dict[key][ikey]) < min_length else min_length158 for i in range(min_length):159 new_name = name160 new_parse = copy.deepcopy(old_parse)161 for j, ikey in enumerate(inner_keys):162 new_parse[2*(total_depth+j)+1] = str(gs_dict[key][ikey][i])163 abbrev_list = ikey.split("_")164 for abbrev in abbrev_list:165 new_name += abbrev[0]166 new_name += str(gs_dict[key][ikey][i])167 if depth+1 < gs_length:168 generate_queue_parse(new_parse, new_name, depth+1, total_depth+len(inner_keys))169 else:170 parse_list.append(new_parse)171 name_list.append(new_name)172 else:173 raise ValueError("elements must be a list type object or a dict type object")174 175 176 def generate_queue_config(old_config, name, depth):177 key = gs_key[depth]178 179 if type(gs_dict[key])==list:180 for i, value in enumerate(gs_dict[key]):181 new_name = name182 new_config = copy.deepcopy(old_config)183 for ckey in config.keys():184 if key in config[ckey].keys():185 new_config[ckey][key] = value186 abbrev_list = key.split("_")187 for abbrev in abbrev_list:188 new_name += abbrev[0]189 new_name += str(value)190 break191 if depth+1 < gs_length:192 generate_queue_config(new_config, new_name, depth+1)193 else:194 config_list.append(new_config)195 name_list.append(new_name)196 #main(config, new_name)197 elif type(gs_dict[key])==dict:198 interlocking_key = list(gs_dict[key].keys())199 min_length = 10200 for ikey in interlocking_key:201 min_length = len(gs_dict[key][ikey]) if len(gs_dict[key][ikey]) < min_length else min_length202 for i in range(min_length):203 new_name = name204 new_config = copy.deepcopy(old_config)205 for ikey in interlocking_key:206 for ckey in config.keys():207 if ikey in config[ckey].keys():208 new_config[ckey][ikey] = gs_dict[key][ikey][i]209 abbrev_list = ikey.split("_")210 for abbrev in abbrev_list:211 new_name += abbrev[0]212 new_name += str(gs_dict[key][ikey][i])213 break214 if depth+1 < gs_length:215 generate_queue_config(new_config, new_name, depth+1)216 else:217 config_list.append(new_config)218 name_list.append(new_name)219 #main(config, new_name)220 else:221 raise ValueError("elements must be a list type object or a dict type object")222 223 224 if parallel_strategy_on:225 if direct_parse_strategy_on:226 generate_queue_flatten_config(flatten_config, dir_name, 0)227 total_parse_list = []228 for config_element, name_element in zip(config_list, name_list):229 total_parse_list.append(["python", "parse_svae.py"] + flatten_config_to_parse(config_element) + ["--name", name_element])230 print(total_parse_list)231# generate_queue_parse(initial_parse, dir_name, 0, 0)232# for parse_element, name_element in zip(parse_list, name_list):233# total_parse_list.append(["python", "parse_svae.py"] + parse_element + ["--name", name_element])234 else:235 generate_queue_config(config, dir_name, 0)236 else:237 if direct_parse_strategy_on:238 generate_queue_parse(initial_parse, dir_name, 0, 0)239 else:240 generate_queue_flatten_config(flatten_config, dir_name, 0)241 242 if parallel_strategy_on:243 if direct_parse_strategy_on:244 for i in range((len(name_list)-1)//max_parallel_queues+1):245 p = mp.Pool(max_parallel_queues)246 p.map(subprocess.run, total_parse_list[max_parallel_queues*i:max_parallel_queues*(i+1)])247 p.close()248 if "gpu" in gs_key:249 gpu_ids = gs_dict["gpu"]250 memory_used = [int(get_gpu_info()[gpu_id]["memory.used"]) for gpu_id in gpu_ids]251 while max(memory_used) > minimum_memory:252 print("waiting in {}-th parallel computation".format(i+1))253 time.sleep(10)254 memory_used = [int(get_gpu_info()[gpu_id]["memory.used"]) for gpu_id in gpu_ids]255 else:256 memory_used = int(get_gpu_info()[gpu_id]["memory.used"])257 while memory_used > minimum_memory:258 print("waiting in {}-th parallel computation".format(i+1))259 time.sleep(10)260 memory_used = int(get_gpu_info()[gpu_id]["memory.used"])261 else:262 for i in range((len(name_list)-1)//max_parallel_queues+1):263 p = mp.Pool(min(mp.cpu_count(), max_parallel_queues))264 p.starmap(main, zip(config_list[max_parallel_queues*i:max_parallel_queues*(i+1)], name_list[max_parallel_queues*i:max_parallel_queues*(i+1)]))265 p.close()266 memory_used = int(get_gpu_info()[gpu_id]["memory.used"])267 while memory_used > minimum_memory:268 print("waiting")269 time.sleep(10)270 memory_used = int(get_gpu_info()[gpu_id]["memory.used"])271 #subprocess.run(["nvidia-smi"])272 else:273 if direct_parse_strategy_on:274 for parse_element, name_element in zip(parse_list, name_list):275 #print(name_element, parse_element)276 subprocess.run(["python", "parse_svae.py"] + parse_element + ["--name", name_element])277 else:278 for config_element, name_element in zip(config_list, name_list):279 parse_element = flatten_config_to_parse(config_element)280 #print(name_element, parse_element)281 subprocess.run(["python", "parse_svae.py"] + parse_element + ["--name", name_element])282 283 284 285if __name__ == "__main__":...

Full Screen

Full Screen

twtt.py

Source:twtt.py Github

copy

Full Screen

1#!/usr/bin/python2# -*- coding: utf-8 -*-3import sys, csv, re, urllib24from HTMLParser import HTMLParser5import NLPlib6tagger = NLPlib.NLPlib()7# ---------------------- Pre-processing Functions ----------------------8# All html tags and attributes (i.e., /<[^>]+>/) are removed9def twtt1(input):10 return html_matcher.sub("", input)11# Html character codes (i.e., &...;) are replaced with an ASCII equivalent12def twtt2(input):13 string = urllib2.unquote(input).decode('utf8','ignore')14 return h.unescape(string).encode(sys.getfilesystemencoding())15# All URLs (i.e., tokens beginning with http or www) are removed16def twtt3(input):17 return url_matcher.sub("", input)18# The first character in Twitter user names and hash tags (i.e., @ and #) are removed19def twtt4(input):20 return tag_matcher.sub("", input)21# Each sentence within a tweet is on its own line22def twtt5(input):23 input = input.strip()24 abbrev_list = [re.sub(r"\n", "", item) for item in list(abbrev.readlines())]25 abbrev_list.extend(['e.g.', 'i.e.'])26 sentence = re.split(r'\s+', input)27 for i in range(len(sentence)):28 if re.search(r"\.",sentence[i]):29 if not(sentence[i] in abbrev_list):30 if re.search("n't.", sentence[i]):31 re.sub("n't.", "n't. \n", sentence[i])32 else: 33 if len(re.split(r"(\W+)(?=\w)", sentence[i])) < 3:34 # sentence[i] = re.split(r"(\W+)(?=\w)", sentence[i])[0] + " \n" + "".join(re.split(r"(\W+)(?=\w)", sentence[i])[1:])35 sentence[i] = sentence[i] + " \n"36 else:37 sentence[i] = "".join(re.split(r"(\W+)(?=\w)", sentence[i])[0:-1]) + " \n" + re.split(r"(\W+)(?=\w)", sentence[i])[-1]38 elif re.search(r"\!|\?",sentence[i]):39 # if sentence[i+1][0].isupper():40 sentence[i] = sentence[i] + " \n"41 if len(sentence) == 1 or len(sentence) == 0:42 pass43 elif sentence[len(sentence)-1][-1] == "\n":44 sentence[len(sentence)-1] = sentence[len(sentence)-1][0:-2]45 return re.sub(r"\n\s+", "\n", " ".join(sentence))46# Ellipsis (i.e., ‘...’), and other kinds of multiple punctuation (e.g., ‘!!!’) are not split47def twtt6(input):48 print("Already qualified in twtt5")49# Each token, including punctuation and clitics, is separated by spaces50def twtt7(input):51 abbrev_list = [re.sub(r"\n", "", item) for item in list(abbrev.readlines())]52 abbrev_list.extend(['e.g.', 'i.e.'])53 sentence = re.split(r" ", input)54 for i in range(len(sentence)):55 if re.search(r"\w\W|\W\w",sentence[i]):56 if not(sentence[i] in abbrev_list):57 if re.search(r"n't", sentence[i]):58 if re.search("n't.", sentence[i]):59 sentence[i] = re.sub("n't.", "n't .", sentence[i])60 sentence[i] = re.sub("n't", " n't", sentence[i])61 elif re.search(r"\n", sentence[i]):62 sentence[i] = " ".join(re.split(r"(\W+)(?=\w)", sentence[i]))63 sentence[i] = " ".join(re.split(r"(?<=\w)(\W+)", sentence[i]))64 elif re.search(r"\$\d", sentence[i]):65 sentence[i] = " ".join(re.split(r'(\$)', sentence[i]))66 elif re.search(r"\d\W\d", sentence[i]):67 pass68 else:69 sentence[i] = re.split(r'(\W)', sentence[i])[0] + " " + "".join(re.split(r'(\W)', sentence[i])[1:])70 return " ".join(sentence)71# Each token is tagged with its part-of-speech72def twtt8(input):73 global tagger74 sentence = re.split(r' ', input)75 tags = tagger.tag(sentence)76 result = []77 for word, tag in zip(sentence, tags):78 result.append(word + "/" + tag)79 return re.sub(r"\n/NN ", "\n", re.sub(r" /NN", "", " ".join(result)))80# Before each tweet is demarcation ‘<A=#>’, which occurs on its own line, where # is the numeric class of the tweet (0 or 4)81def twtt9(input, polarity):82 pol = "<A=" + polarity + ">\n"83 result = pol + input84 return result85# ---------------------- Main Function ----------------------86def main(input_file, student_id, output_file):87 #check if input args are valid88 try:89 id = int(student_id)90 except:91 print("Please input a valid student ID.")92 start_pos1 = id % 80 * 1000093 start_pos2 = 800000 + start_pos194 with open(input_file, 'r') as infile, open(output_file, 'w') as outfile:95 raw = csv.reader(infile)96 data = list(raw)97 if output_file == 'test.twt':98 print(len(data))99 for line in data:100 outfile.write(twtt9(twtt8(twtt7(twtt5(twtt4(twtt3(twtt2(twtt1(line[5].strip()))))))), line[0]))101 outfile.write("\n")102 else:103 for line in data[start_pos1 : start_pos1+10000]:104 outfile.write(twtt9(twtt8(twtt7(twtt5(twtt4(twtt3(twtt2(twtt1(line[5].strip()))))))), line[0]))105 outfile.write("\n")106 for line in data[start_pos2 : start_pos2+10000]:107 outfile.write(twtt9(twtt8(twtt7(twtt5(twtt4(twtt3(twtt2(twtt1(line[5].strip()))))))), line[0]))108 outfile.write("\n")109if __name__ == "__main__":110 111 h = HTMLParser()112 htmlCase = r'<[^>]+>'113 html_matcher = re.compile(htmlCase)114 urlCase = r"http\S+|www\S+|Http\S+|WWW\S+"115 url_matcher = re.compile(urlCase)116 tagCase = r"@|#"117 tag_matcher = re.compile(tagCase)118 with open("../Wordlists/abbrev.english", 'r') as abbrev:119 # check if the number of input args is 3120 try:121 main(sys.argv[1], sys.argv[2], sys.argv[3])122 except:123 print("Valid Format: python twtt.py [input_filename] [Student ID] [output_filename]")124 # test = 'Http://bit.ly/PACattack'125 # # print twtt8(twtt7(twtt5(twtt4(twtt3(twtt2(twtt1(test)))))))126 # print twtt9(twtt8(twtt7(twtt5(twtt4(twtt3(twtt2(twtt1(test))))))), '0')127 # print twtt8("sdjfh jaskfjd , jhj she i 've said, she 's are n't ! $ 100,000. ? dogs ' ' something good ' ??? ... j s")128 # test = 'Meet me today at the FEC in DC at 4. Wear a carnation so I know it’s you. <a href="Http://bit.ly/PACattack" target="_blank" class="tweet-url web" rel="nofollow">Http://bit.ly/PACattack</a>.'129 # print twtt9(twtt8(twtt7(twtt5(twtt4(twtt3(twtt2(twtt1(test))))))), '0')130 #print(twtt1('<p>Http://bit.ly/PACattack</p>'))131 132 ...

Full Screen

Full Screen

get_census_geographies.py

Source:get_census_geographies.py Github

copy

Full Screen

1"""2download geographies from the 2011 tiger line census files3unzip in folders4https://www2.census.gov/geo/tiger/TIGER2011/PLACE/tl_2011_01_place.zip 5https://www2.census.gov/geo/tiger/TIGER2011/COUSUB/tl_2011_01_cousub.zip6- place (done)7- county subdivisions (done)8- blocks (done)9- state shape (done)10- project all to 3857 (state shape)11- and create difference of state shape (diff) place12"""13# importing required modules14import pandas as pd15import geopandas as gpd16import urllib17import zipfile18import time19import os20# read crosswalk21state_fips = pd.read_csv('D:/GIS data/county_fips/state_fips.csv')22state_fips['fips'] = state_fips['fips'].map(lambda x:str(x).zfill(2))23fips_list = list(state_fips['fips'])24abbrev_list = list(state_fips['abbrev'])25cwd = os.getcwd()26cwd27os.chdir('D:/GIS data/')28#download 2011 places29for i in range(0, len(abbrev_list)):30 print (abbrev_list[i], fips_list[i])31 url = "https://www2.census.gov/geo/tiger/TIGER2011/PLACE/tl_2011_{0}_place.zip".format(fips_list[i])32 extract_dir = "./{0}/Census/tl_2011_{0}_place/".format(abbrev_list[i])33 if not os.path.exists(extract_dir):34 os.makedirs(extract_dir)35 36 zip_path, _ = urllib.request.urlretrieve(url)37 with zipfile.ZipFile(zip_path, "r") as f:38 f.extractall(extract_dir)39 40#download 2011 county subdivisions41for i in range(0, len(abbrev_list)):42 print (abbrev_list[i], fips_list[i])43 url = "https://www2.census.gov/geo/tiger/TIGER2011/COUSUB/tl_2011_{0}_cousub.zip".format(fips_list[i])44 extract_dir = "./{0}/Census/tl_2011_{0}_cousub/".format(abbrev_list[i])45 if not os.path.exists(extract_dir):46 os.makedirs(extract_dir)47 zip_path, _ = urllib.request.urlretrieve(url)48 with zipfile.ZipFile(zip_path, "r") as f:49 f.extractall(extract_dir)50 51#download 2010 counties52for i in range(0, len(abbrev_list)):53 print (abbrev_list[i], fips_list[i])54 url = "https://www2.census.gov/geo/tiger/TIGER2010/COUNTY/2010/tl_2010_{0}_county10.zip".format(fips_list[i])55 extract_dir = "./{0}/Census/tl_2010_{1}_county10/".format(abbrev_list[i], fips_list[i])56 if not os.path.exists(extract_dir):57 os.makedirs(extract_dir)58 zip_path, _ = urllib.request.urlretrieve(url)59 with zipfile.ZipFile(zip_path, "r") as f:60 f.extractall(extract_dir)61#download 2010 states62for i in range(0, len(abbrev_list)):63 print (abbrev_list[i], fips_list[i])64 url = "https://www2.census.gov/geo/tiger/TIGER2010/STATE/2010/tl_2010_{0}_state10.zip".format(fips_list[i])65 extract_dir = "./{0}/Census/tl_2010_{1}_state10/".format(abbrev_list[i], fips_list[i])66 if not os.path.exists(extract_dir):67 os.makedirs(extract_dir)68 zip_path, _ = urllib.request.urlretrieve(url)69 with zipfile.ZipFile(zip_path, "r") as f:70 f.extractall(extract_dir)71#download 2019 block groups72for i in range(0, len(abbrev_list)):73 print (abbrev_list[i], fips_list[i])74 url = "https://www2.census.gov/geo/tiger/TIGER2019/BG/tl_2019_{0}_bg.zip".format(fips_list[i])75 extract_dir = "./{0}/Census/tl_2019_{0}_bg/".format(abbrev_list[i])76 if not os.path.exists(extract_dir):77 os.makedirs(extract_dir)78 zip_path, _ = urllib.request.urlretrieve(url)79 with zipfile.ZipFile(zip_path, "r") as f:80 f.extractall(extract_dir)81 82#download 2020 block groups83for i in range(0, len(abbrev_list)):84 print (abbrev_list[i], fips_list[i])85 url = "https://www2.census.gov/geo/tiger/TIGER2020/BG/tl_2020_{0}_bg.zip".format(fips_list[i])86 extract_dir = "./{0}/Census/tl_2020_{0}_bg/".format(abbrev_list[i])87 if not os.path.exists(extract_dir):88 os.makedirs(extract_dir)89 zip_path, _ = urllib.request.urlretrieve(url)90 with zipfile.ZipFile(zip_path, "r") as f:91 f.extractall(extract_dir)92 93 94t0 = time.time() 95# download 2010 blocks96# averages ~3.5 minutes per state97for i in range(42, len(abbrev_list)): # SD - VA98 print (abbrev_list[i], fips_list[i])99 100 url = "https://www2.census.gov/geo/tiger/TIGER2020/TABBLOCK/tl_2020_{0}_tabblock10.zip".format(fips_list[i])101 extract_dir = "./{0}/Census/tl_2020_{0}_tabblock10/".format(abbrev_list[i])102 if not os.path.exists(extract_dir):103 os.makedirs(extract_dir)104 105 zip_path, _ = urllib.request.urlretrieve(url)106 with zipfile.ZipFile(zip_path, "r") as f:107 f.extractall(extract_dir)108t1 = time.time()109elapsed = t1-t0 110t0 = time.time()111# for each state, project places and county subdivisions and state shapes to EPSG 3857 pseudo mercator112for i in range(41,50) :113 st_name = abbrev_list[i]114 st_fips = fips_list[i]115 116 print(st_name, st_fips)117 118 print("read files")119 state_dir = "D:/GIS data/{0}".format(abbrev_list[i])120 place = gpd.read_file("{0}/tl_2011_{1}_place.shp".format(state_dir,fips_list[i]))121 cousub = gpd.read_file("{0}/tl_2011_{1}_cousub.shp".format(state_dir,fips_list[i]))122 st_shape = gpd.read_file("{0}/{1}_TL_2019_STATE_proj.shp".format(state_dir, abbrev_list[i]))123 124 print("change crs")125 place = place.to_crs(st_shape.crs)126 cousub = cousub.to_crs(st_shape.crs)127 128 place.to_file("{0}/tl_2011_{1}_place_proj.shp".format(state_dir,fips_list[i]))129 cousub.to_file("{0}/tl_2011_{1}_cousub_proj.shp".format(state_dir,fips_list[i]))130 131 print("spatial difference")132 place_out = gpd.overlay(st_shape, place, how="difference")133 place_all = place.append(place_out)134 place_all.to_file("{0}/tl_2011_{1}_place_all.shp".format(state_dir,fips_list[i]))135 136t1 = time.time()137total = t1-t0138 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful