How to use log_last_traceback method in autotest

Best Python code snippet using autotest_python

util.py

Source:util.py Github

copy

Full Screen

...11 try:12 return s.decode("utf-8")13 except AttributeError:14 return s15def log_last_traceback():16 logger.error(traceback.format_exc())17def log_traceback(func):18 @functools.wraps(func)19 def wrapper(*args, **kwargs):20 logger.info("function %s is about to be started", func)21 try:22 response = func(*args, **kwargs)23 except Exception:24 log_last_traceback()25 else:26 logger.info("function %s finished", func)27 # TODO: how long it took?28 return response29 return wrapper30def setup_dirs():31 """Make required directories to hold logfile.32 :returns: str33 """34 try:35 top_dir = os.path.abspath(os.path.expanduser(os.environ["XDG_CACHE_HOME"]))36 except KeyError:37 top_dir = os.path.abspath(os.path.expanduser("~/.cache"))38 our_cache_dir = os.path.join(top_dir, PROJECT_NAME)39 os.makedirs(our_cache_dir, mode=0o775, exist_ok=True)40 return our_cache_dir41def get_log_file_path():42 return os.path.join(setup_dirs(), LOG_FILE_NAME)43def humanize_bytes(bytesize, precision=2):44 """45 Humanize byte size figures46 https://gist.github.com/moird/368459547 """48 abbrevs = (49 (1 << 50, 'PB'),50 (1 << 40, 'TB'),51 (1 << 30, 'GB'),52 (1 << 20, 'MB'),53 (1 << 10, 'kB'),54 (1, 'bytes')55 )56 if bytesize == 1:57 return '1 byte'58 for factor, suffix in abbrevs:59 if bytesize >= factor:60 break61 if factor == 1:62 precision = 063 return '%.*f %s' % (precision, bytesize / float(factor), suffix)64def humanize_time(value):65 abbrevs = (66 (1, "now"),67 (2, "{seconds} seconds ago"),68 (59, "{seconds} seconds ago"),69 (60, "{minutes} minute ago"),70 (119, "{minutes} minute ago"),71 (120, "{minutes} minutes ago"),72 (3599, "{minutes} minutes ago"),73 (3600, "{hours} hour ago"),74 (7199, "{hours} hour ago"),75 (86399, "{hours} hours ago"),76 (86400, "{days} day ago"),77 (172799, "{days} day ago"),78 (172800, "{days} days ago"),79 (172800, "{days} days ago"),80 (2591999, "{days} days ago"),81 (2592000, "{months} month ago"),82 (5183999, "{months} month ago"),83 (5184000, "{months} months ago"),84 )85 n = datetime.now()86 delta = n - value87 for guard, message in abbrevs:88 s = int(delta.total_seconds())89 if guard >= s:90 break91 return message.format(seconds=delta.seconds, minutes=int(delta.seconds // 60),92 hours=int(delta.seconds // 3600), days=delta.days,93 months=int(delta.days // 30))94# # This function is able to crash python b/c it may write monster-amount of data.95# # Use it only for debugging, do not ship it!96# def log_vars_from_tback(process_frames=5):97# for th in threading.enumerate():98# try:99# thread_frames = sys._current_frames()[th.ident]100# except KeyError:101# continue102# logger.debug(''.join(traceback.format_stack(thread_frames)))103#104# logger.error(traceback.format_exc())105# if process_frames <= 0:106# return107# tb = sys.exc_info()[2]108# while 1:109# if not tb.tb_next:110# break111# tb = tb.tb_next112# stack = []113# f = tb.tb_frame114# while f:115# stack.append(f)116# f = f.f_back117# for frame in stack[:process_frames]:118# logger.debug("frame %s:%s", frame.f_code.co_filename, frame.f_lineno)119# for key, value in frame.f_locals.items():120# try:121# logger.debug("%20s = %s", key, value)122# except Exception:123# logger.debug("%20s = CANNOT PRINT VALUE", key)124#125# # self_instance = frame.f_locals.get("self", None)126# # if not self_instance:127# # continue128# # for key in dir(self_instance):129# # if key.startswith("__"):130# # continue131# # try:132# # value = getattr(self_instance, key, None)133# # logger.debug("%20s = %s", "self." + key, value)134# # except Exception:135# # logger.debug("%20s = CANNOT PRINT VALUE", "self." + key)136# this is taken directly from docker client:137# https://github.com/docker/docker/blob/28a7577a029780e4533faf3d057ec9f6c7a10948/api/client/stats.go#L309138def calculate_cpu_percent(d):139 cpu_count = len(d["cpu_stats"]["cpu_usage"]["percpu_usage"])140 cpu_percent = 0.0141 cpu_delta = float(d["cpu_stats"]["cpu_usage"]["total_usage"]) - \142 float(d["precpu_stats"]["cpu_usage"]["total_usage"])143 system_delta = float(d["cpu_stats"]["system_cpu_usage"]) - \144 float(d["precpu_stats"]["system_cpu_usage"])145 if system_delta > 0.0:146 cpu_percent = cpu_delta / system_delta * 100.0 * cpu_count147 return cpu_percent148# again taken directly from docker:149# https://github.com/docker/cli/blob/2bfac7fcdafeafbd2f450abb6d1bb3106e4f3ccb/cli/command/container/stats_helpers.go#L168150# precpu_stats in 1.13+ is completely broken, doesn't contain any values151def calculate_cpu_percent2(d, previous_cpu, previous_system):152 # import json153 # du = json.dumps(d, indent=2)154 # logger.debug("XXX: %s", du)155 cpu_percent = 0.0156 cpu_total = float(d["cpu_stats"]["cpu_usage"]["total_usage"])157 cpu_delta = cpu_total - previous_cpu158 cpu_system = float(d["cpu_stats"]["system_cpu_usage"])159 system_delta = cpu_system - previous_system160 online_cpus = d["cpu_stats"].get("online_cpus", len(d["cpu_stats"]["cpu_usage"].get("percpu_usage", [None])))161 if system_delta > 0.0:162 cpu_percent = (cpu_delta / system_delta) * online_cpus * 100.0163 return cpu_percent, cpu_system, cpu_total164def calculate_blkio_bytes(d):165 """166 :param d:167 :return: (read_bytes, wrote_bytes), ints168 """169 bytes_stats = graceful_chain_get(d, "blkio_stats", "io_service_bytes_recursive")170 if not bytes_stats:171 return 0, 0172 r = 0173 w = 0174 for s in bytes_stats:175 if s["op"] == "Read":176 r += s["value"]177 elif s["op"] == "Write":178 w += s["value"]179 return r, w180def calculate_network_bytes(d):181 """182 :param d:183 :return: (received_bytes, transceived_bytes), ints184 """185 networks = graceful_chain_get(d, "networks")186 if not networks:187 return 0, 0188 r = 0189 t = 0190 for if_name, data in networks.items():191 logger.debug("getting stats for interface %r", if_name)192 r += data["rx_bytes"]193 t += data["tx_bytes"]194 return r, t195def graceful_chain_get(d, *args, default=None):196 t = d197 for a in args:198 try:199 t = t[a]200 except (KeyError, ValueError, TypeError, AttributeError):201 logger.debug("can't get %r from %s", a, t)202 return default203 return t204def repeater(call, args=None, kwargs=None, retries=4):205 """206 repeat call x-times: docker API is just awesome207 :param call: function208 :param args: tuple, args for function209 :param kwargs: dict, kwargs for function210 :param retries: int, how many times we try?211 :return: response of the call212 """213 args = args or ()214 kwargs = kwargs or {}215 t = 1.0216 for x in range(retries):217 try:218 return call(*args, **kwargs)219 except APIError as ex:220 logger.error("query #%d: docker returned an error: %r", x, ex)221 except Exception as ex:222 # this may be pretty bad223 log_last_traceback()224 logger.error("query #%d: generic error: %r", x, ex)225 t *= 2226 time.sleep(t)227class OrderedSet(list):228 def append(self, p_object):229 if p_object in self:230 self.remove(p_object)...

Full Screen

Full Screen

contriaer-util.py

Source:contriaer-util.py Github

copy

Full Screen

...11 try:12 return s.decode("utf-8")13 except AttributeError:14 return s15def log_last_traceback():16 logger.error(traceback.format_exc())17def log_traceback(func):18 @functools.wraps(func)19 def wrapper(*args, **kwargs):20 logger.info("function %s is about to be started", func)21 try:22 response = func(*args, **kwargs)23 except Exception:24 log_last_traceback()25 else:26 logger.info("function %s finished", func)27 # TODO: how long it took?28 return response29 return wrapper30def setup_dirs():31 """Make required directories to hold logfile.32 :returns: str33 """34 try:35 top_dir = os.path.abspath(os.path.expanduser(os.environ["XDG_CACHE_HOME"]))36 except KeyError:37 top_dir = os.path.abspath(os.path.expanduser("~/.cache"))38 our_cache_dir = os.path.join(top_dir, PROJECT_NAME)39 os.makedirs(our_cache_dir, mode=0o775, exist_ok=True)40 return our_cache_dir41def get_log_file_path():42 return os.path.join(setup_dirs(), LOG_FILE_NAME)43def humanize_bytes(bytesize, precision=2):44 """45 Humanize byte size figures46 https://gist.github.com/moird/368459547 """48 abbrevs = (49 (1 << 50, 'PB'),50 (1 << 40, 'TB'),51 (1 << 30, 'GB'),52 (1 << 20, 'MB'),53 (1 << 10, 'kB'),54 (1, 'bytes')55 )56 if bytesize == 1:57 return '1 byte'58 for factor, suffix in abbrevs:59 if bytesize >= factor:60 break61 if factor == 1:62 precision = 063 return '%.*f %s' % (precision, bytesize / float(factor), suffix)64def humanize_time(value):65 abbrevs = (66 (1, "now"),67 (2, "{seconds} seconds ago"),68 (59, "{seconds} seconds ago"),69 (60, "{minutes} minute ago"),70 (119, "{minutes} minute ago"),71 (120, "{minutes} minutes ago"),72 (3599, "{minutes} minutes ago"),73 (3600, "{hours} hour ago"),74 (7199, "{hours} hour ago"),75 (86399, "{hours} hours ago"),76 (86400, "{days} day ago"),77 (172799, "{days} day ago"),78 (172800, "{days} days ago"),79 (172800, "{days} days ago"),80 (2591999, "{days} days ago"),81 (2592000, "{months} month ago"),82 (5183999, "{months} month ago"),83 (5184000, "{months} months ago"),84 )85 n = datetime.now()86 delta = n - value87 for guard, message in abbrevs:88 s = int(delta.total_seconds())89 if guard >= s:90 break91 return message.format(seconds=delta.seconds, minutes=int(delta.seconds // 60),92 hours=int(delta.seconds // 3600), days=delta.days,93 months=int(delta.days // 30))94# # This function is able to crash python b/c it may write monster-amount of data.95# # Use it only for debugging, do not ship it!96# def log_vars_from_tback(process_frames=5):97# for th in threading.enumerate():98# try:99# thread_frames = sys._current_frames()[th.ident]100# except KeyError:101# continue102# logger.debug(''.join(traceback.format_stack(thread_frames)))103#104# logger.error(traceback.format_exc())105# if process_frames <= 0:106# return107# tb = sys.exc_info()[2]108# while 1:109# if not tb.tb_next:110# break111# tb = tb.tb_next112# stack = []113# f = tb.tb_frame114# while f:115# stack.append(f)116# f = f.f_back117# for frame in stack[:process_frames]:118# logger.debug("frame %s:%s", frame.f_code.co_filename, frame.f_lineno)119# for key, value in frame.f_locals.items():120# try:121# logger.debug("%20s = %s", key, value)122# except Exception:123# logger.debug("%20s = CANNOT PRINT VALUE", key)124#125# # self_instance = frame.f_locals.get("self", None)126# # if not self_instance:127# # continue128# # for key in dir(self_instance):129# # if key.startswith("__"):130# # continue131# # try:132# # value = getattr(self_instance, key, None)133# # logger.debug("%20s = %s", "self." + key, value)134# # except Exception:135# # logger.debug("%20s = CANNOT PRINT VALUE", "self." + key)136# this is taken directly from docker client:137# https://github.com/docker/docker/blob/28a7577a029780e4533faf3d057ec9f6c7a10948/api/client/stats.go#L309138def calculate_cpu_percent(d):139 cpu_count = len(d["cpu_stats"]["cpu_usage"]["percpu_usage"])140 cpu_percent = 0.0141 cpu_delta = float(d["cpu_stats"]["cpu_usage"]["total_usage"]) - \142 float(d["precpu_stats"]["cpu_usage"]["total_usage"])143 system_delta = float(d["cpu_stats"]["system_cpu_usage"]) - \144 float(d["precpu_stats"]["system_cpu_usage"])145 if system_delta > 0.0:146 cpu_percent = cpu_delta / system_delta * 100.0 * cpu_count147 return cpu_percent148# again taken directly from docker:149# https://github.com/docker/cli/blob/2bfac7fcdafeafbd2f450abb6d1bb3106e4f3ccb/cli/command/container/stats_helpers.go#L168150# precpu_stats in 1.13+ is completely broken, doesn't contain any values151def calculate_cpu_percent2(d, previous_cpu, previous_system):152 # import json153 # du = json.dumps(d, indent=2)154 # logger.debug("XXX: %s", du)155 cpu_percent = 0.0156 cpu_total = float(d["cpu_stats"]["cpu_usage"]["total_usage"])157 cpu_delta = cpu_total - previous_cpu158 cpu_system = float(d["cpu_stats"]["system_cpu_usage"])159 system_delta = cpu_system - previous_system160 online_cpus = d["cpu_stats"].get("online_cpus", len(d["cpu_stats"]["cpu_usage"]["percpu_usage"]))161 if system_delta > 0.0:162 cpu_percent = (cpu_delta / system_delta) * online_cpus * 100.0163 return cpu_percent, cpu_system, cpu_total164def calculate_blkio_bytes(d):165 """166 :param d:167 :return: (read_bytes, wrote_bytes), ints168 """169 bytes_stats = graceful_chain_get(d, "blkio_stats", "io_service_bytes_recursive")170 if not bytes_stats:171 return 0, 0172 r = 0173 w = 0174 for s in bytes_stats:175 if s["op"] == "Read":176 r += s["value"]177 elif s["op"] == "Write":178 w += s["value"]179 return r, w180def calculate_network_bytes(d):181 """182 :param d:183 :return: (received_bytes, transceived_bytes), ints184 """185 networks = graceful_chain_get(d, "networks")186 if not networks:187 return 0, 0188 r = 0189 t = 0190 for if_name, data in networks.items():191 logger.debug("getting stats for interface %r", if_name)192 r += data["rx_bytes"]193 t += data["tx_bytes"]194 return r, t195def graceful_chain_get(d, *args, default=None):196 t = d197 for a in args:198 try:199 t = t[a]200 except (KeyError, ValueError, TypeError, AttributeError):201 logger.debug("can't get %r from %s", a, t)202 return default203 return t204def repeater(call, args=None, kwargs=None, retries=4):205 """206 repeat call x-times: docker API is just awesome207 :param call: function208 :param args: tuple, args for function209 :param kwargs: dict, kwargs for function210 :param retries: int, how many times we try?211 :return: response of the call212 """213 args = args or ()214 kwargs = kwargs or {}215 t = 1.0216 for x in range(retries):217 try:218 return call(*args, **kwargs)219 except APIError as ex:220 logger.error("query #%d: docker returned an error: %r", x, ex)221 except Exception as ex:222 # this may be pretty bad223 log_last_traceback()224 logger.error("query #%d: generic error: %r", x, ex)225 t *= 2226 time.sleep(t)227class OrderedSet(list):228 def append(self, p_object):229 if p_object in self:230 self.remove(p_object)...

Full Screen

Full Screen

cli.py

Source:cli.py Github

copy

Full Screen

...55 except KeyboardInterrupt:56 print("Quitting on user request.")57 return 158 except Exception as ex: # pylint: disable=broad-except59 log_last_traceback()60 if args.debug:61 raise62 else:63 # TODO: improve this message to be more thorough64 print("There was an error during program execution, see logs for more info.")65 return 166 return 067if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful