How to use num_machines method in autotest

Best Python code snippet using autotest_python

launch.py

Source:launch.py Github

copy

Full Screen

1#!/usr/bin/env python32# -*- coding:utf-8 -*-3# Code are based on4# https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/launch.py5# Copyright (c) Facebook, Inc. and its affiliates.6# Copyright (c) Megvii, Inc. and its affiliates.7from loguru import logger8import torch9import torch.distributed as dist10import torch.multiprocessing as mp11import yolox.utils.dist as comm12from yolox.utils import configure_nccl13import os14import subprocess15import sys16import time17__all__ = ["launch"]18def _find_free_port():19 """20 Find an available port of current machine / node.21 """22 import socket23 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)24 # Binding to port 0 will cause the OS to find an available port for us25 sock.bind(("", 0))26 port = sock.getsockname()[1]27 sock.close()28 # NOTE: there is still a chance the port could be taken by other processes.29 return port30def launch(31 main_func,32 num_gpus_per_machine,33 num_machines=1,34 machine_rank=0,35 backend="nccl",36 dist_url=None,37 args=(),38):39 """40 Args:41 main_func: a function that will be called by `main_func(*args)`42 num_machines (int): the total number of machines43 machine_rank (int): the rank of this machine (one per machine)44 dist_url (str): url to connect to for distributed training, including protocol45 e.g. "tcp://127.0.0.1:8686".46 Can be set to auto to automatically select a free port on localhost47 args (tuple): arguments passed to main_func48 """49 world_size = num_machines * num_gpus_per_machine50 if world_size > 1:51 if int(os.environ.get("WORLD_SIZE", "1")) > 1:52 dist_url = "{}:{}".format(53 os.environ.get("MASTER_ADDR", None),54 os.environ.get("MASTER_PORT", "None"),55 )56 local_rank = int(os.environ.get("LOCAL_RANK", "0"))57 world_size = int(os.environ.get("WORLD_SIZE", "1"))58 _distributed_worker(59 local_rank,60 main_func,61 world_size,62 num_gpus_per_machine,63 num_machines,64 machine_rank,65 backend,66 dist_url,67 args,68 )69 exit()70 launch_by_subprocess(71 sys.argv,72 world_size,73 num_machines,74 machine_rank,75 num_gpus_per_machine,76 dist_url,77 args,78 )79 else:80 main_func(*args)81def launch_by_subprocess(82 raw_argv,83 world_size,84 num_machines,85 machine_rank,86 num_gpus_per_machine,87 dist_url,88 args,89):90 assert (91 world_size > 192 ), "subprocess mode doesn't support single GPU, use spawn mode instead"93 if dist_url is None:94 # ------------------------hack for multi-machine training -------------------- #95 if num_machines > 1:96 master_ip = subprocess.check_output(["hostname", "--fqdn"]).decode("utf-8")97 master_ip = str(master_ip).strip()98 dist_url = "tcp://{}".format(master_ip)99 ip_add_file = "./" + args[1].experiment_name + "_ip_add.txt"100 if machine_rank == 0:101 port = _find_free_port()102 with open(ip_add_file, "w") as ip_add:103 ip_add.write(dist_url+'\n')104 ip_add.write(str(port))105 else:106 while not os.path.exists(ip_add_file):107 time.sleep(0.5)108 with open(ip_add_file, "r") as ip_add:109 dist_url = ip_add.readline().strip()110 port = ip_add.readline()111 else:112 dist_url = "tcp://127.0.0.1"113 port = _find_free_port()114 # set PyTorch distributed related environmental variables115 current_env = os.environ.copy()116 current_env["MASTER_ADDR"] = dist_url117 current_env["MASTER_PORT"] = str(port)118 current_env["WORLD_SIZE"] = str(world_size)119 assert num_gpus_per_machine <= torch.cuda.device_count()120 if "OMP_NUM_THREADS" not in os.environ and num_gpus_per_machine > 1:121 current_env["OMP_NUM_THREADS"] = str(1)122 logger.info(123 "\n*****************************************\n"124 "Setting OMP_NUM_THREADS environment variable for each process "125 "to be {} in default, to avoid your system being overloaded, "126 "please further tune the variable for optimal performance in "127 "your application as needed. \n"128 "*****************************************".format(129 current_env["OMP_NUM_THREADS"]130 )131 )132 processes = []133 for local_rank in range(0, num_gpus_per_machine):134 # each process's rank135 dist_rank = machine_rank * num_gpus_per_machine + local_rank136 current_env["RANK"] = str(dist_rank)137 current_env["LOCAL_RANK"] = str(local_rank)138 # spawn the processes139 cmd = ["python3", *raw_argv]140 process = subprocess.Popen(cmd, env=current_env)141 processes.append(process)142 for process in processes:143 process.wait()144 if process.returncode != 0:145 raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)146def _distributed_worker(147 local_rank,148 main_func,149 world_size,150 num_gpus_per_machine,151 num_machines,152 machine_rank,153 backend,154 dist_url,155 args,156):157 assert (158 torch.cuda.is_available()159 ), "cuda is not available. Please check your installation."160 configure_nccl()161 global_rank = machine_rank * num_gpus_per_machine + local_rank162 logger.info("Rank {} initialization finished.".format(global_rank))163 try:164 dist.init_process_group(165 backend=backend,166 init_method=dist_url,167 world_size=world_size,168 rank=global_rank,169 )170 except Exception:171 logger.error("Process group URL: {}".format(dist_url))172 raise173 # synchronize is needed here to prevent a possible timeout after calling init_process_group174 # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172175 comm.synchronize()176 if global_rank == 0 and os.path.exists(177 "./" + args[1].experiment_name + "_ip_add.txt"178 ):179 os.remove("./" + args[1].experiment_name + "_ip_add.txt")180 assert num_gpus_per_machine <= torch.cuda.device_count()181 torch.cuda.set_device(local_rank)182 args[1].local_rank = local_rank183 args[1].num_machines = num_machines184 # Setup the local process group (which contains ranks within the same machine)185 # assert comm._LOCAL_PROCESS_GROUP is None186 # num_machines = world_size // num_gpus_per_machine187 # for i in range(num_machines):188 # ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))189 # pg = dist.new_group(ranks_on_i)190 # if i == machine_rank:191 # comm._LOCAL_PROCESS_GROUP = pg...

Full Screen

Full Screen

factorycalc.py

Source:factorycalc.py Github

copy

Full Screen

1# Calculates how many machines and resources are needed to produce given products2# Starts in main()3# Set goal to whatever products you wish to produce4# Output in out.json5# Author: Samuel Johansson6import json7f = open ('recipes.json', "r")8RECIPES = json.loads(f.read())9to_make = {}10def handle_requirements(goal_name, goal_amount, level, recipe_version = "default"): 11 if goal_name in RECIPES:12 # goal_recipe expected to look like: 13 # {"production": ##, "requirements": {"RESOURCE1": ##, ...},"machine": "MACHINE_NAME","by_products": {"BYPROUCT1": ##, ...}}14 goal_recipe = RECIPES[goal_name][recipe_version]15 goal_production_ratio = goal_amount/goal_recipe["production"] # how much needed over how much one machine can produce16 tabs = " "*level # for a nicer print17 print(tabs,"Need", round(goal_production_ratio, 2), goal_name, goal_recipe["machine"]+"(s) for", goal_amount, "per min")18 if goal_recipe["requirements"] is not None:19 for r_key, r_val in goal_recipe["requirements"].items(): # loop through each requirement (get name/id and amount needed)20 rec_version = "alt1" if r_key == "Screw" else "default" # Want to use alt1 recipe instead of default for all Screw production21 requirement_recipe = RECIPES[r_key][rec_version]22 r_goal = r_val*goal_production_ratio23 num_machines = r_goal/requirement_recipe["production"]24 # Add required product to the list if new, else update values (may save on machines if overlapping requirements)25 if r_key not in to_make:26 to_make[r_key] = {"total": r_goal, "machine": requirement_recipe["machine"], "num_machines": num_machines ,"for_"+goal_name: r_goal}27 else:28 to_make[r_key]["total"] += r_goal29 num_machines = to_make[r_key]["total"]/requirement_recipe["production"]30 to_make[r_key]["num_machines"] = num_machines31 if "for_"+goal_name not in to_make[r_key]:32 to_make[r_key]["for_"+goal_name] = r_goal33 else:34 to_make[r_key]["for_"+goal_name] += r_goal35 36 # Add by products to the list37 if "by_products" in requirement_recipe and requirement_recipe["by_products"] is not None:38 for by_key, by_val in requirement_recipe["by_products"].items():39 if by_key not in to_make:40 to_make[by_key] = {"total": by_val*num_machines, "as_by_product": by_val*num_machines}41 else:42 to_make[by_key]["total"] += by_val*num_machines43 if "as_by_product" in to_make[by_key]:44 to_make[by_key]["as_by_product"] += by_val*num_machines45 else:46 to_make[by_key]["as_by_product"] = by_val*num_machines47 handle_requirements(r_key, r_goal, level+1, "alt1" if r_key == "Screw" else "default") # Recursion <348 49 else:50 print(tabs,"We're at the end!")51 else:52 print("Could not find recipe for", goal_name)53def main():54 # Set goal to whatever products you wish to produce55 goal = {"Screw": 40}56 57 global to_make 58 to_make = dict(goal)59 # Format goal for to_make-dict60 for k, v in to_make.items():61 recipe_version = "alt1" if k == "Screw" else "default"62 to_make[k] = {"total": v, "machine": RECIPES[k][recipe_version]["machine"], "num_machines": v/RECIPES[k][recipe_version]["production"]}63 64 # main loop65 for goal_key, goal_value in goal.items():66 print("To produce", goal_value, goal_key + ":")67 # TODO: Better alt-recipe handling68 recipe_version = "alt1" if k == "Screw" else "default"69 handle_requirements(goal_key, goal_value, 1, recipe_version)70 outfile = open("out.json", "w")71 outfile.write(json.dumps(to_make))72 print(to_make)73 74if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful