How to use get_running_container_names method in localstack

Best Python code snippet using localstack_python

utils.py

Source:utils.py Github

copy

Full Screen

...95 elapsed_in_s = now - last_time96 if elapsed_in_s < cached_s:97 recalc = False98 if recalc:99 names = get_running_container_names()100 CACHE[key] = {"last_time": now, "names": names}101 else:102 names = CACHE[key]["names"]103 return names104def get_stdout_file_in_container(directory: str, outfile_name: str = "") -> str:105 settings = get_replik_settings(directory)106 project_name = settings["name"]107 now = datetime.now()108 dt_string = now.strftime("%Y%m%d_%H%M%S")109 if len(outfile_name) == 0:110 return f"/home/user/{project_name}/.rp/logs/stdout_{dt_string}.log"111 else:112 return f"/home/user/{project_name}/.rp/logs/{outfile_name}_{dt_string}.log"113def get_free_resources_cached(cached_s=2.0, debug=False):114 """get resources is expensive! Query only once in a short time!"""115 global CACHE116 key = "get_free_resources"117 now = time()118 recalc = True119 if key in CACHE:120 last_time = CACHE[key]["last_time"]121 elapsed_in_s = now - last_time122 if elapsed_in_s < cached_s:123 recalc = False124 if recalc:125 if debug:126 print("[debug|get_free_resources_cached] recalc!")127 free_cpu, free_mem, free_gpus = get_free_resources(debug)128 CACHE[key] = {129 "last_time": now,130 "free_cpu": free_cpu,131 "free_mem": free_mem,132 "free_gpus": free_gpus,133 }134 else:135 if debug:136 print("[debug|get_free_resources_cached] cached!")137 free_cpu = CACHE[key]["free_cpu"]138 free_mem = CACHE[key]["free_mem"]139 free_gpus = CACHE[key]["free_gpus"]140 return free_cpu, free_mem, free_gpus141def get_free_resources(debug: bool):142 free_cpu = get_ncpu()143 free_mem, _ = get_memory()144 for proc in get_currently_running_docker_procs(debug=debug):145 free_cpu -= proc.cpu146 free_mem -= proc.mem147 free_gpus = get_free_gpu_device_ids()148 return free_cpu, free_mem, free_gpus149def get_unique_id(settings):150 """151 unique id for this run152 """153 rand = random.uniform(0.1, 2.5)154 txt = f"{settings['tag']}_{time()}_{rand}"155 return txt156def get_frozen_image_path(directory: str) -> str:157 """158 Path to the frozen image159 """160 info = utils.get_replik_settings(directory)161 project_name = info["name"]162 return join(get_dockerdir(directory), f"frozen_{project_name}_image.tar")163def get_dockerdir(directory: str) -> str:164 return join(directory, "docker")165def get_tempdockerdir(directory: str) -> str:166 unique_name = f"{time()}"167 path = join(directory, f".cache/rp/docker/{unique_name}")168 return path169def replik_root_file(directory: str) -> str:170 """171 {root}/.rp172 """173 return join(directory, ".rp/info.json")174def is_replik_project(directory: str) -> bool:175 """"""176 return isfile(replik_root_file(directory))177def get_replik_settings(directory: str) -> Dict:178 """"""179 if not is_replik_project(directory):180 console.fail(f"Directory {directory} is no rp project")181 exit(0) # exit program182 replik_fname = replik_root_file(directory)183 with open(replik_fname, "r") as f:184 return json.load(f)185def get_ncpu():186 return multiprocessing.cpu_count()187def get_memory():188 mem = psutil.virtual_memory()189 total = mem.total / (1024.0 ** 3)190 available = mem.available / (1024.0 ** 3) * 0.95191 return int(total), int(available)192def get_paths_for_mapping(directory):193 assert is_replik_project(directory=directory)194 fname = get_paths_fname(directory)195 with open(fname, "r") as f:196 return json.load(f)197def kill_container(name: str):198 """"""199 return call(f"docker kill {name}", shell=True)200def get_running_container_names():201 """202 get the names of all currently running rp containers203 """204 return [205 f.replace('"', "")206 for f in (207 subprocess.run(208 ["docker", "ps", "--format", '"{{.Names}}"'], stdout=subprocess.PIPE209 )210 .stdout.decode("utf-8")211 .lower()212 .split("\n")213 )214 if len(f) > 0 and f.replace('"', "").startswith("rp")215 ]216def datetime_from_utc_to_local(utc_datetime):217 now_timestamp = time()218 offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(219 now_timestamp220 )221 return utc_datetime + offset222def get_currently_running_docker_procs(debug: bool) -> List[RunningProcess]:223 running_processes = []224 for container_name in get_running_container_names():225 if debug:226 print(227 f"[debug|get_currently_running_docker_procs] try to check container {container_name}"228 )229 _start = time()230 try:231 dev = subprocess.run(232 [233 "docker",234 "inspect",235 "--format='{{json .HostConfig}}'",236 container_name,237 ],238 stdout=subprocess.PIPE,239 ).stdout.decode("utf-8")[1:-2]240 image_name = subprocess.run(241 [242 "docker",243 "inspect",244 "--format='{{json .Config.Image}}'",245 container_name,246 ],247 stdout=subprocess.PIPE,248 ).stdout.decode("utf-8")[2:-3]249 start_time = subprocess.run(250 [251 "docker",252 "inspect",253 "--format='{{json .State.StartedAt}}'",254 container_name,255 ],256 stdout=subprocess.PIPE,257 ).stdout.decode("utf-8")[1:-2]258 start_time = start_time.replace('"', "").replace("T", " ")259 end_pt = start_time.find(260 "."261 ) # the nanosec confuse the converter and they don't matter anyways262 start_time = start_time[:end_pt]263 start_time = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")264 start_time = datetime_from_utc_to_local(start_time).timestamp()265 container = json.loads(dev)266 cpu = container["NanoCpus"] / 1000000000267 shm_size = container["ShmSize"] / (1024 ** 3)268 mem = container["Memory"] / (1024 ** 3)269 if container["DeviceRequests"] is None:270 gpu_device_ids = None271 else:272 gpu_device_ids = container["DeviceRequests"][0]["DeviceIDs"]273 device_ids = []274 if gpu_device_ids is not None:275 for did in gpu_device_ids:276 device_ids.append(int(did))277 running_processes.append(278 RunningProcess(279 cpu=cpu,280 mem=mem,281 shm_size=shm_size,282 gpu_devices=device_ids,283 docker_name=container_name,284 image_name=image_name,285 start_time=start_time,286 )287 )288 if debug:289 print(290 f"[debug|get_currently_running_docker_procs] add running proc {container_name}"291 )292 print(f"\tcpu:{cpu} mem:{mem}, gpus:{device_ids}")293 print("\telapsed", time() - _start)294 except:295 print(296 f"[debug|get_currently_running_docker_procs] failed to load {container_name}"297 )298 return list(sorted(running_processes, key=lambda p: p.start_time))299def get_gpus():300 GPU_WHITELIST = [301 "GeForce RTX 2080 Ti",302 "GeForce RTX 3090",303 "GeForce GTX 1080 Ti",304 "TITAN RTX",305 ]306 gpu_uid_to_device_id = {}307 gpus = {} # device_id -> {}308 for device_id, query in enumerate(309 [310 f311 for f in subprocess.run(312 ["nvidia-smi", "--query-gpu=gpu_name,gpu_uuid", "--format=csv"],313 stdout=subprocess.PIPE,314 )315 .stdout.decode("utf-8")316 .split("\n")317 if len(f) > 0 and not f.startswith("name")318 ]319 ):320 query = query.split(", ")321 name = query[0]322 uuid = query[1]323 gpu_uid_to_device_id[uuid] = device_id324 gpus[device_id] = {"name": name, "in_use": False, "by": None, "uuid": uuid}325 for container_name in get_running_container_names():326 dev = subprocess.run(327 [328 "docker",329 "inspect",330 "--format='{{json .HostConfig.DeviceRequests}}'",331 container_name,332 ],333 stdout=subprocess.PIPE,334 ).stdout.decode("utf-8")335 if dev == "null" or dev == "'null'" or dev is None:336 pass # no GPU for this container!337 else:338 try:339 dev = str(dev)[1:-2]...

Full Screen

Full Screen

memory_checker

Source:memory_checker Github

copy

Full Screen

...74 else:75 syslog.syslog(syslog.LOG_ERR, "[memory_checker] Failed to retrieve memory value from '{}'"76 .format(mem_usage))77 sys.exit(4)78def get_running_container_names():79 """Retrieves names of running containers by talking to the docker daemon.80 Args:81 None.82 Returns:83 running_container_names: A list indicates names of running containers.84 """85 try:86 docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock')87 running_container_list = docker_client.containers.list(filters={"status": "running"})88 running_container_names = [ container.name for container in running_container_list ]89 except (docker.errors.APIError, docker.errors.DockerException) as err:90 syslog.syslog(syslog.LOG_ERR,91 "Failed to retrieve the running container list from docker daemon! Error message is: '{}'"92 .format(err))93 sys.exit(5)94 return running_container_names95def main():96 parser = argparse.ArgumentParser(description="Check memory usage of a container \97 and an alerting message will be written into syslog if memory usage \98 is larger than the threshold value", usage="/usr/bin/memory_checker <container_name> <threshold_value_in_bytes>")99 parser.add_argument("container_name", help="container name")100 # TODO: Currently the threshold value is hard coded as a command line argument and will101 # remove this in the new version since we want to read this value from 'CONFIG_DB'.102 parser.add_argument("threshold_value", type=int, help="threshold value in bytes")103 args = parser.parse_args()104 running_container_names = get_running_container_names()105 if args.container_name in running_container_names:106 check_memory_usage(args.container_name, args.threshold_value)107 else:108 syslog.syslog(syslog.LOG_INFO,109 "[memory_checker] Exits without checking memory usage since container '{}' is not running!"110 .format(args.container_name))111if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful