How to use pop_path method in Slash

Best Python code snippet using slash

__init__.py

Source:__init__.py Github

copy

Full Screen

1from collections import deque2import pydash as _3from aiostream import stream4import config5import motor.motor_asyncio6mongo_client = motor.motor_asyncio.AsyncIOMotorClient(config.CONFIG.MONGO.get('uri'))7db = mongo_client.get_database()8class PopulatePath:9 def __init__(self, pop):10 self.local_field = pop.get('local_field')11 self.let = pop.get('let')12 self.from_ = pop.get('from')13 self.foreign_collection = db[self.from_]14 self.foreign_field = pop.get('foreign_field')15 self.filter_ = pop.get('filter') or {}16 self.projection = pop.get('projection')17 self.as_ = pop.get('as')18 self.pop_fields = pop.get('pop_field')19class BaseModel(motor.motor_asyncio.AsyncIOMotorCollection):20 def populates(self, pop_fields, filter_=None, projection=None, sort=None, skip=0, limit=0, batch_size=1000):21 pop_paths = self._gen_pop_pathes(pop_fields)22 cursor = self.find(filter=filter_, projection=projection, sort=sort, skip=skip, limit=limit,23 batch_size=batch_size)24 return self._f(pop_paths, cursor, batch_size)25 def _gen_pop_pathes(self, pop_fields):26 pop_list, pop_paths = deque(), []27 pop_list.extend(pop_fields.values())28 while len(pop_list):29 pop = pop_list.popleft()30 if pop:31 pop_paths.append(PopulatePath(pop))32 if 'pop_fields' in pop:33 pop_list.extend(pop.get('pop_fields').values())34 return pop_paths35 async def _f(self, pop_paths, cursor, batch_size):36 async with stream.chunks(cursor, batch_size).stream() as chunks:37 async for chunk in chunks:38 l_docs = chunk39 for pop_path in pop_paths:40 l_docs = await self._populate(l_docs, pop_path)41 for doc in l_docs:42 yield doc43 async def _populate(self, l_docs: list, pop_path: PopulatePath):44 if pop_path.let:45 # 字段预处理46 def _t(doc, k, expr):47 v = expr(doc)48 if v:49 doc = _.set_(doc, k, v)50 return doc51 l_docs = [_t(doc, k, expr) for k, expr in pop_path.let.items() for doc in l_docs]52 if pop_path.from_ not in await db.list_collection_names():53 raise Exception(f'Collection {pop_path.from_} does NOT exists')54 l_keys = set([_.get(doc, pop_path.local_field) for doc in l_docs if _.has(doc, pop_path.local_field)])55 if not l_keys or len(l_keys) <= 0:56 return l_docs57 f_docs = {doc.get('_id'): doc async for doc in pop_path.foreign_collection.find(58 filter={**pop_path.filter_, **{pop_path.foreign_field: {'$in': list(l_keys)}}},59 projection=pop_path.projection)}60 del l_keys # 释放资源61 for doc in l_docs:62 _k = _.get(doc, pop_path.local_field)63 if _k in f_docs:64 doc.update(_.set_({}, pop_path.as_, f_docs.get(_k)))65 return l_docs66_model_mapping = {67 'merchant': 'merchants',68 'impression_track': 'impressiontracks',69 'act_share_detail': 'actsharedetails',70}71for k, v in _model_mapping.items():72 obj = db[v]73 obj.__class__ = BaseModel74 locals()[k] = obj...

Full Screen

Full Screen

download.py

Source:download.py Github

copy

Full Screen

1#!/usr/bin/env python32# -*- coding: utf-8 -*-3import logging as log4import urllib35from BaseCM import cm_hddcdd6import refurbish as rf7logging = log.getLogger("cm-refurbish/download.py")8logging.setLevel(log.DEBUG)9def download_building_stock():10 csv_path = rf.path_building_stock()11 if not csv_path.exists():12 csv_url = (13 "https://gitlab.com"14 "/hotmaps/building-stock/-/raw/master/data/building_stock.csv"15 )16 http = urllib3.PoolManager()17 logging.info(18 f"Downloading building stock data\n - from: {csv_url}\n - to: {csv_path}"19 )20 with http.request("GET", csv_url, preload_content=False) as csv_req, open(21 csv_path, "b+w"22 ) as csv_file:23 csv_file.write(csv_req.read())24 logging.info("Done!")25 else:26 logging.info(27 f"Building stock file already exists: {csv_path}. Download SKIPPED!"28 )29def download_population():30 pop_path = rf.path_population()31 if not pop_path.exists():32 pop_url = (33 "https://gisco-services.ec.europa.eu"34 "/distribution/v2/lau/geojson/LAU_RG_01M_2020_4326.geojson"35 )36 http = urllib3.PoolManager()37 logging.info(38 f"Downloading population data\n - from: {pop_url}\n - to: {pop_path}"39 )40 with http.request("GET", pop_url, preload_content=False) as pop_req, open(41 pop_path, "b+w"42 ) as pop_file:43 pop_file.write(pop_req.read())44 logging.info("Done!")45 else:46 logging.info(f"Population file already exists: {pop_path}. Download SKIPPED!")47def download_tabula_Umean():48 tab_path = rf.path_tabula_Umean()49 if not tab_path.exists():50 tab_url = (51 "https://gitlab.inf.unibz.it"52 "/URS/enermaps/tabula/-/raw/main/data/tabula-umean.csv"53 )54 http = urllib3.PoolManager()55 logging.info(56 f"Downloading tabula data\n - from: {tab_url}\n - to: {tab_path}"57 )58 with http.request("GET", tab_url, preload_content=False) as tab_req, open(59 tab_path, "b+w"60 ) as tab_file:61 tab_file.write(tab_req.read())62 logging.info("Done!")63 else:64 logging.info(f"Tabula file already exists: {tab_path}. Download SKIPPED!")65def download_datasets():66 """Download the data sets required by the CM."""67 # download HDDs and CDDs data set if not already available68 # breakpoint()69 cm_hddcdd.download_data()70 # download the datasets required by the refurbish CM.71 bstk_path = rf.path_building_stock()72 logging.info(f"{bstk_path}: {bstk_path.exists()}")73 download_building_stock()74 logging.info(f"{bstk_path}: {bstk_path.exists()}")75 pop_path = rf.path_population()76 logging.info(f"{pop_path}: {pop_path.exists()}")77 download_population()78 logging.info(f"{pop_path}: {pop_path.exists()}")79 tab_path = rf.path_tabula_Umean()80 logging.info(f"{tab_path}: {tab_path.exists()}")81 download_tabula_Umean()82 logging.info(f"{tab_path}: {tab_path.exists()}")83if __name__ == "__main__":...

Full Screen

Full Screen

performance_utils.py

Source:performance_utils.py Github

copy

Full Screen

1# Standard Library2from datetime import datetime3from os import listdir4from os.path import isfile, join5from pathlib import Path6from time import time7from typing import List, Tuple8# Local Module9from ga.config import GAconfig10from ga.main import genetic_algorithm11from ga.representation.base import GeneOperator12def ga_testing(13 config: GAconfig, file_logger, func_name: str, need_record=True14):15 start = time()16 populations, fitnesses = genetic_algorithm(config)17 end = time()18 now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')19 execution_sec = end - start20 last_pop, last_fitness = populations[-1], fitnesses[-1]21 max_fitness = max(last_fitness)22 max_idx = last_fitness.index(max_fitness)23 strongest_individual = last_pop[max_idx]24 strongest_phenotype = config.setting.gene_operator.decode(25 strongest_individual26 )27 aggregated_metric: Tuple = (28 now,29 execution_sec,30 *strongest_phenotype,31 max_fitness,32 )33 metric: Tuple = tuple(str(i) for i in aggregated_metric)34 file_logger.info(35 ' '.join(36 [37 f'{last_pop=}',38 f'{last_fitness=}',39 f'{strongest_individual=}',40 f'{strongest_phenotype=}',41 f'{max_fitness=}',42 f'{max_idx=}',43 ]44 )45 )46 if need_record:47 write_metric(func_name, metric, populations, fitnesses, config)48 return populations, fitnesses49def write_metric(50 func_name: str,51 metric: Tuple,52 populations: List,53 fitnesses: List,54 config: GAconfig,55):56 operator: GeneOperator = config.setting.gene_operator57 pop_path = f'./performance/{func_name}/populations'58 fitness_path = f'./performance/{func_name}/fitnesses'59 Path(pop_path).mkdir(parents=True, exist_ok=True)60 Path(fitness_path).mkdir(parents=True, exist_ok=True)61 pop_filename = [f for f in listdir(pop_path) if isfile(join(pop_path, f))]62 fitness_filename = [63 f for f in listdir(fitness_path) if isfile(join(fitness_path, f))64 ]65 if pop_filename == []:66 pop_filename = ['0.csv']67 if fitness_filename == []:68 fitness_filename = ['0.csv']69 processed_pop_filename = [int(p.replace('.csv', '')) for p in pop_filename]70 processed_fitness_filename = [71 int(p.replace('.csv', '')) for p in fitness_filename72 ]73 max_value: int = max(74 [max(processed_pop_filename), max(processed_fitness_filename)]75 )76 now_idx = max_value + 177 with open(f'./performance/{func_name}/metric.csv', 'a') as wf:78 wf.write(','.join(metric))79 wf.write('\n')80 with open(f'{pop_path}/{now_idx}.csv', 'w') as wf:81 for p in populations:82 individuals = [operator.format(i) for i in p]83 line = ','.join(individuals)84 wf.write(line)85 wf.write('\n')86 with open(f'{fitness_path}/{now_idx}.csv', 'w') as wf:87 for f in fitnesses:88 line = ','.join(str(i) for i in f)89 wf.write(line)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful