How to use log_marker method in Slash

Best Python code snippet using slash

build.py

Source:build.py Github

copy

Full Screen

...110 # safe to crank up debian-cd's verbosity so that the logs are most111 # useful.112 config["VERBOSE"] = "3"113 return log_path114def log_marker(message):115 logger.info("===== %s =====" % message)116 logger.info(time.strftime("%a %b %e %H:%M:%S UTC %Y", time.gmtime()))117def want_live_builds(options):118 return options is not None and getattr(options, "live", False)119def _anonftpsync_config_path(config):120 if config["ANONFTPSYNC_CONF"]:121 return config["ANONFTPSYNC_CONF"]122 paths = [123 os.path.join(config.root, "production", "anonftpsync"),124 os.path.join(config.root, "etc", "anonftpsync"),125 ]126 for path in paths:127 if os.path.exists(path):128 return path129 else:130 return None131def _anonftpsync_options(config):132 env = {}133 path = _anonftpsync_config_path(config)134 if path:135 whitelisted_keys = [136 "RSYNC_EXCLUDE",137 "RSYNC_INCLUDE",138 "RSYNC_ICONV",139 "RSYNC_PASSWORD",140 "RSYNC_PROXY",141 "RSYNC_RSH",142 "RSYNC_SRC",143 ]144 for key, value in osextras.read_shell_config(path, whitelisted_keys):145 if key.startswith("RSYNC_"):146 env[key] = value147 if "RSYNC_SRC" not in env:148 raise Exception(149 "RSYNC_SRC not configured! Edit %s or %s and try again." % (150 os.path.join(config.root, "production", "anonftpsync"),151 os.path.join(config.root, "etc", "anonftpsync")))152 return env153def anonftpsync(config):154 env = dict(os.environ)155 for key, value in _anonftpsync_options(config).items():156 env[key] = value157 target = os.path.join(config.root, "ftp")158 fqdn = socket.getfqdn()159 lock_base = "Archive-Update-in-Progress-%s" % fqdn160 lock = os.path.join(target, lock_base)161 pkglist = "--include-from=" + config["RSYNC_PKGLIST_PATH"]162 if subprocess.call(163 ["lockfile", "-!", "-l", "43200", "-r", "0", lock]) == 0:164 raise Exception(165 "%s is unable to start rsync; lock file exists." % fqdn)166 try:167 log_path = os.path.join(config.root, "log", "rsync.log")168 osextras.ensuredir(os.path.dirname(log_path))169 with open(log_path, "w") as log:170 command_base = [171 "rsync", "--recursive", "--links", "--hard-links", "--times",172 "--verbose", "--stats", "--chmod=Dg+s,g+rwX",173 pkglist,174 "--exclude", lock_base,175 "--exclude", "project/trace/%s" % fqdn,176 ]177 exclude = env.get("RSYNC_EXCLUDE", "").split()178 include = env.get("RSYNC_INCLUDE", "").split()179 source_target = ["%s/" % env["RSYNC_SRC"], "%s/" % target]180 subprocess.call(181 command_base + [182 "--exclude", "Packages*", "--exclude", "Sources*",183 "--exclude", "Release*", "--exclude", "InRelease",184 "--include", "i18n/by-hash/**", "--exclude", "i18n/*",185 ] + include + exclude + source_target,186 stdout=log, stderr=subprocess.STDOUT, env=env)187 # Second pass to update metadata and clean up old files.188 subprocess.call(189 command_base + [190 "--delay-updates", "--delete", "--delete-after",191 ] + include + exclude + source_target,192 stdout=log, stderr=subprocess.STDOUT, env=env)193 # Delete dangling symlinks.194 for dirpath, _, filenames in os.walk(target):195 for filename in filenames:196 path = os.path.join(dirpath, filename)197 if os.path.islink(path) and not os.path.exists(path):198 os.unlink(path)199 trace_dir = os.path.join(target, "project", "trace")200 osextras.ensuredir(trace_dir)201 with open(os.path.join(trace_dir, fqdn), "w") as trace:202 subprocess.check_call(["date", "-u"], stdout=trace)203 # Note: if you don't have savelog, use any other log rotation204 # facility, or comment this out, the log will simply be overwritten205 # each time.206 with open("/dev/null", "w") as devnull:207 subprocess.call(208 ["savelog", log_path],209 stdout=devnull, stderr=subprocess.STDOUT)210 finally:211 osextras.unlink_force(lock)212def sync_local_mirror(config, multipidfile_state):213 if config["CDIMAGE_NOSYNC"]:214 return215 capproject = config.capproject216 sync_lock = os.path.join(config.root, "etc", ".lock-archive-sync")217 if not multipidfile_state:218 log_marker("Syncing %s mirror" % capproject)219 # Acquire lock to allow parallel builds to ensure a consistent220 # archive.221 try:222 subprocess.check_call(["lockfile", "-r", "4", sync_lock])223 except subprocess.CalledProcessError:224 logger.error("Couldn't acquire archive sync lock!")225 raise226 try:227 anonftpsync(config)228 finally:229 osextras.unlink_force(sync_lock)230 else:231 log_marker(232 "Parallel build; waiting for %s mirror to sync" % capproject)233 try:234 subprocess.check_call(["lockfile", "-8", "-r", "450", sync_lock])235 except subprocess.CalledProcessError:236 logger.error("Timed out waiting for archive sync lock!")237 raise238 osextras.unlink_force(sync_lock)239def _dpkg_field(path, field):240 return subprocess.check_output(241 ["dpkg", "-f", path, field], universal_newlines=True).rstrip("\n")242def _find_endswith(path, suffixes):243 for dirpath, _, filenames in os.walk(path):244 for filename in filenames:245 for suffix in suffixes:246 if filename.endswith(suffix):247 yield dirpath, filename248 break249def update_local_indices(config):250 packages = os.path.join(config.root, "local", "packages")251 if not os.path.isdir(packages):252 return253 database = os.path.normpath(os.path.join(packages, os.pardir, "database"))254 dists = os.path.join(database, "dists")255 indices = os.path.join(database, "indices")256 pool = os.path.join(packages, "pool", "local")257 osextras.ensuredir(dists)258 osextras.ensuredir(indices)259 for arch in config.cpuarches:260 binary_list_path = os.path.join(261 dists, "%s_local_binary-%s.list" % (config.series, arch))262 di_binary_list_path = os.path.join(263 dists, "%s_local_debian-installer_binary-%s.list" % (264 config.series, arch))265 override_path = os.path.join(266 indices, "override.%s.local.%s" % (config.series, arch))267 di_override_path = os.path.join(268 indices, "override.%s.local.debian-installer.%s" % (269 config.series, arch))270 with open(binary_list_path, "w") as binary_list, \271 open(di_binary_list_path, "w") as di_binary_list, \272 open(override_path, "w") as override, \273 open(di_override_path, "w") as di_override:274 for dirpath, deb in _find_endswith(275 pool, ["_%s.deb" % arch, "_all.deb"]):276 deb_path = os.path.join(dirpath, deb)277 print(os.path.relpath(deb_path, packages), file=binary_list)278 name = deb.split("_", 1)[0]279 section = _dpkg_field(deb_path, "Section").split("/")[-1]280 priority = _dpkg_field(deb_path, "Priority")281 print(282 "%s\t%s\tlocal/%s" % (name, priority, section),283 file=override)284 for dirpath, udeb in _find_endswith(285 pool, ["_%s.udeb" % arch, "_all.udeb"]):286 udeb_path = os.path.join(dirpath, udeb)287 print(288 os.path.relpath(udeb_path, packages), file=di_binary_list)289 name = udeb.split("_", 1)[0]290 priority = _dpkg_field(udeb_path, "Priority")291 print(292 "%s\t%s\tlocal/debian-installer" % (name, priority),293 file=di_override)294 osextras.ensuredir(os.path.join(295 packages, "dists", config.series, "local", "binary-%s" % arch))296 osextras.ensuredir(os.path.join(297 packages, "dists", config.series, "local", "debian-installer",298 "binary-%s" % arch))299 subprocess.check_call(300 ["apt-ftparchive", "generate", "apt-ftparchive.conf"], cwd=packages)301def build_britney(config):302 update_out = os.path.join(config.root, "britney", "update_out")303 if os.path.isfile(os.path.join(update_out, "Makefile")):304 log_marker("Building britney")305 subprocess.check_call(["make", "-C", update_out])306class UnknownLocale(Exception):307 pass308def build_ubuntu_defaults_locale(config):309 locale = config["UBUNTU_DEFAULTS_LOCALE"]310 if locale != "zh_CN":311 raise UnknownLocale(312 "UBUNTU_DEFAULTS_LOCALE='%s' not currently supported!" % locale)313 series = config["DIST"]314 log_marker("Downloading live filesystem images")315 download_live_filesystems(config)316 scratch = live_output_directory(config)317 for entry in os.listdir(scratch):318 if "." in entry:319 os.rename(320 os.path.join(scratch, entry),321 os.path.join(scratch, "%s-desktop-%s" % (series, entry)))322 pi_makelist = os.path.join(323 config.root, "debian-cd", "tools", "pi-makelist")324 for entry in os.listdir(scratch):325 if entry.endswith(".iso"):326 entry_path = os.path.join(scratch, entry)327 list_path = "%s.list" % entry_path.rsplit(".", 1)[0]328 with open(list_path, "w") as list_file:329 subprocess.check_call(330 [pi_makelist, entry_path], stdout=list_file)331def add_android_support(config, arch, output_dir):332 """Copy Android support files to an Ubuntu Touch image.333 """334 live_scratch_dir = os.path.join(335 config.root, "scratch", config.project, config.full_series,336 config.image_type, "live")337 # copy recovery, boot and system imgs in place338 for target in Touch.list_targets_by_ubuntu_arch(arch):339 boot_img_src = "boot-%s+%s.img" % (target.ubuntu_arch, target.subarch)340 boot_img = "%s-preinstalled-boot-%s+%s.img" % (341 config.series, arch, target.subarch)342 system_img_src = "system-%s+%s.img" % (343 target.android_arch, target.subarch)344 system_img = "%s-preinstalled-system-%s+%s.img" % (345 config.series, target.android_arch, target.subarch)346 recovery_img_src = "recovery-%s+%s.img" % (347 target.android_arch, target.subarch)348 recovery_img = "%s-preinstalled-recovery-%s+%s.img" % (349 config.series, target.android_arch, target.subarch)350 shutil.copy2(351 os.path.join(live_scratch_dir, boot_img_src),352 os.path.join(output_dir, boot_img))353 shutil.copy2(354 os.path.join(live_scratch_dir, system_img_src),355 os.path.join(output_dir, system_img))356 shutil.copy2(357 os.path.join(live_scratch_dir, recovery_img_src),358 os.path.join(output_dir, recovery_img))359def build_livecd_base(config):360 log_marker("Downloading live filesystem images")361 download_live_filesystems(config)362 if (config.project in ("ubuntu-server", ) and363 config.image_type == "daily-preinstalled"):364 log_marker("Copying images to debian-cd output directory")365 scratch_dir = os.path.join(366 config.root, "scratch", config.project, config.full_series,367 config.image_type)368 live_dir = os.path.join(scratch_dir, "live")369 for arch in config.arches:370 output_dir = os.path.join(scratch_dir, "debian-cd", arch)371 osextras.ensuredir(output_dir)372 live_prefix = os.path.join(live_dir, arch)373 rootfs = "%s.disk1.img.xz" % (live_prefix)374 output_prefix = os.path.join(output_dir,375 "%s-preinstalled-server-%s" %376 (config.series, arch))377 with open("%s.type" % output_prefix, "w") as f:378 print("EXT4 Filesystem Image", file=f)379 shutil.copy2(rootfs, "%s.raw" % output_prefix)380 shutil.copy2(381 "%s.manifest" % live_prefix, "%s.manifest" % output_prefix)382 if (config.project in ("ubuntu", ) and383 config.image_type == "daily-preinstalled"):384 log_marker("Copying images to debian-cd output directory")385 scratch_dir = os.path.join(386 config.root, "scratch", config.project, config.full_series,387 config.image_type)388 live_dir = os.path.join(scratch_dir, "live")389 for arch in config.arches:390 output_dir = os.path.join(scratch_dir, "debian-cd", arch)391 osextras.ensuredir(output_dir)392 live_prefix = os.path.join(live_dir, arch)393 rootfs = "%s.img.xz" % (live_prefix)394 output_prefix = os.path.join(output_dir,395 "%s-preinstalled-desktop-%s" %396 (config.series, arch))397 with open("%s.type" % output_prefix, "w") as f:398 print("EXT4 Filesystem Image", file=f)399 shutil.copy2(rootfs, "%s.raw" % output_prefix)400 shutil.copy2(401 "%s.manifest" % live_prefix, "%s.manifest" % output_prefix)402 if (config.project in ("ubuntu-core", "ubuntu-appliance") and403 config.image_type == "daily-live"):404 log_marker("Copying images to debian-cd output directory")405 scratch_dir = os.path.join(406 config.root, "scratch", config.project, config.full_series,407 config.image_type)408 live_dir = os.path.join(scratch_dir, "live")409 for arch in config.arches:410 output_dir = os.path.join(scratch_dir, "debian-cd", arch)411 osextras.ensuredir(output_dir)412 live_prefix = os.path.join(live_dir, arch)413 rootfs = "%s.img.xz" % (live_prefix)414 output_prefix = os.path.join(output_dir,415 "%s-live-core-%s" %416 (config.series, arch))417 with open("%s.type" % output_prefix, "w") as f:418 print("Disk Image", file=f)419 shutil.copy2(rootfs, "%s.raw" % output_prefix)420 shutil.copy2(421 "%s.manifest" % live_prefix, "%s.manifest" % output_prefix)422 shutil.copy2(423 "%s.model-assertion" % live_prefix,424 "%s.model-assertion" % output_prefix)425 # qcow2 images for appliances are optional426 live_qcow2 = "%s.qcow2" % live_prefix427 if os.path.exists(live_qcow2):428 shutil.copy2(429 live_qcow2, "%s.qcow2" % output_prefix)430 if (config.project in ("ubuntu-base", "ubuntu-touch") or431 (config.project == "ubuntu-core" and432 config.subproject == "system-image")):433 log_marker("Copying images to debian-cd output directory")434 scratch_dir = os.path.join(435 config.root, "scratch", config.project, config.full_series,436 config.image_type)437 live_dir = os.path.join(scratch_dir, "live")438 for arch in config.arches:439 live_prefix = os.path.join(live_dir, arch)440 rootfs = "%s.rootfs.tar.gz" % live_prefix441 if os.path.exists(rootfs):442 output_dir = os.path.join(scratch_dir, "debian-cd", arch)443 osextras.ensuredir(output_dir)444 if config.project == "ubuntu-core":445 output_prefix = os.path.join(446 output_dir,447 "%s-preinstalled-core-%s" % (config.series, arch))448 elif config.project == "ubuntu-base":449 output_prefix = os.path.join(450 output_dir, "%s-base-%s" % (config.series, arch))451 elif config.project == "ubuntu-touch":452 output_prefix = os.path.join(453 output_dir,454 "%s-preinstalled-touch-%s" % (config.series, arch))455 shutil.copy2(rootfs, "%s.raw" % output_prefix)456 with open("%s.type" % output_prefix, "w") as f:457 print("tar archive", file=f)458 shutil.copy2(459 "%s.manifest" % live_prefix, "%s.manifest" % output_prefix)460 if config.project == "ubuntu-touch":461 osextras.link_force(462 "%s.raw" % output_prefix, "%s.tar.gz" % output_prefix)463 add_android_support(config, arch, output_dir)464 custom = "%s.custom.tar.gz" % live_prefix465 if os.path.exists(custom):466 shutil.copy2(467 custom, "%s.custom.tar.gz" % output_prefix)468 if config.project == "ubuntu-core":469 for dev in ("azure.device", "device", "raspi2.device",470 "plano.device"):471 device = "%s.%s.tar.gz" % (live_prefix, dev)472 if os.path.exists(device):473 shutil.copy2(474 device, "%s.%s.tar.gz" % (output_prefix, dev))475 for snaptype in ("os", "kernel", "raspi2.kernel",476 "dragonboard.kernel"):477 snap = "%s.%s.snap" % (live_prefix, snaptype)478 if os.path.exists(snap):479 shutil.copy2(480 snap, "%s.%s.snap" % (output_prefix, snaptype))481def _debootstrap_script(config):482 return "usr/share/debootstrap/scripts/%s" % config.series483def extract_debootstrap(config):484 output_dir = os.path.join(485 config.root, "scratch", config.project, config.full_series,486 config.image_type, "debootstrap")487 osextras.ensuredir(output_dir)488 for fullarch in config.arches:489 arch = fullarch.split("+")[0]490 mirror = find_mirror(config, arch)491 # TODO: This might be more sensible with python-debian or python-apt.492 packages_path = os.path.join(493 mirror, "dists", config.series, "main", "debian-installer",494 "binary-%s" % arch, "Packages.gz")495 with gzip.GzipFile(packages_path, "rb") as packages:496 grep_dctrl = subprocess.Popen(497 ["grep-dctrl", "-nsFilename", "-PX", "debootstrap-udeb"],498 stdin=subprocess.PIPE, stdout=subprocess.PIPE)499 udeb, _ = grep_dctrl.communicate(packages.read())500 if not isinstance(udeb, str):501 udeb = udeb.decode()502 udeb = udeb.rstrip("\n")503 udeb_path = os.path.join(mirror, udeb)504 if not udeb or not os.path.exists(udeb_path):505 logger.warning(506 "No debootstrap-udeb for %s/%s!" % (config.series, arch))507 continue508 # TODO: With python-debian, we could extract the one file we need509 # directly.510 unpack_dir = os.path.join(output_dir, "unpack-%s" % fullarch)511 try:512 shutil.rmtree(unpack_dir)513 except OSError:514 pass515 subprocess.check_call(["dpkg", "-x", udeb_path, unpack_dir])516 shutil.copy2(517 os.path.join(unpack_dir, _debootstrap_script(config)),518 os.path.join(output_dir, "%s-%s" % (config.series, fullarch)))519def configure_splash(config):520 project = config.project521 data_dir = os.path.join(config.root, "debian-cd", "data", config.series)522 for key, extension in (523 ("SPLASHRLE", "rle"),524 ("GFXSPLASH", "pcx"),525 ("SPLASHPNG", "png"),526 ):527 project_image = os.path.join(data_dir, "%s.%s" % (project, extension))528 generic_image = os.path.join(data_dir, "splash.%s" % extension)529 if os.path.exists(project_image):530 config[key] = project_image531 else:532 config[key] = generic_image533def run_debian_cd(config):534 log_marker("Building %s daily CDs" % config.capproject)535 debian_cd_dir = os.path.join(config.root, "debian-cd")536 subprocess.call(["./build_all.sh"], cwd=debian_cd_dir, env=config.export())537def fix_permissions(config):538 """Kludge to work around permission-handling problems elsewhere."""539 scratch_dir = os.path.join(540 config.root, "scratch", config.project, config.full_series,541 config.image_type)542 if not os.path.isdir(scratch_dir):543 return544 def fix_directory(path):545 old_mode = os.stat(path).st_mode546 new_mode = old_mode | stat.S_IRGRP | stat.S_IWGRP547 new_mode |= stat.S_ISGID | stat.S_IXGRP548 if new_mode != old_mode:549 try:550 os.chmod(path, new_mode)551 except OSError:552 pass553 def fix_file(path):554 old_mode = os.stat(path).st_mode555 new_mode = old_mode | stat.S_IRGRP | stat.S_IWGRP556 if new_mode & (stat.S_IXUSR | stat.S_IXOTH):557 new_mode |= stat.S_IXGRP558 if new_mode != old_mode:559 try:560 os.chmod(path, new_mode)561 except OSError:562 pass563 fix_directory(scratch_dir)564 for dirpath, dirnames, filenames in os.walk(scratch_dir):565 for dirname in dirnames:566 fix_directory(os.path.join(dirpath, dirname))567 for filename in filenames:568 fix_file(os.path.join(dirpath, filename))569def notify_failure(config, log_path):570 if config["DEBUG"] or config["CDIMAGE_NOLOG"]:571 return572 project = config.project573 if config["UBUNTU_DEFAULTS_LOCALE"] == "zh_CN":574 project = "ubuntu-chinese-edition"575 series = config.full_series576 image_type = config.image_type577 date = config["CDIMAGE_DATE"]578 recipients = get_notify_addresses(config, project)579 if not recipients:580 return581 try:582 if log_path is None:583 body = ""584 else:585 body = open(log_path)586 send_mail(587 "CD image %s%s/%s/%s failed to build on %s" % (588 ("(built by %s) " % config["SUDO_USER"]589 if config["SUDO_USER"] else ""),590 project, series, image_type, date),591 "build-image-set", recipients, body)592 finally:593 if log_path is not None:594 body.close()595def is_live_fs_only(config):596 live_fs_only = False597 if config.project in (598 "livecd-base", "ubuntu-base", "ubuntu-core", "ubuntu-appliance",599 "ubuntu-touch"):600 live_fs_only = True601 elif (config.project in ("ubuntu", "ubuntu-server") and602 config.image_type == "daily-preinstalled"):603 live_fs_only = True604 elif config.subproject == "wubi":605 live_fs_only = True606 return live_fs_only607def build_image_set_locked(config, options, multipidfile_state):608 image_type = config.image_type609 config["CDIMAGE_DATE"] = date = next_build_id(config, image_type)610 log_path = None611 try:612 configure_for_project(config)613 log_path = open_log(config)614 if want_live_builds(options):615 log_marker("Building live filesystems")616 live_successful = run_live_builds(config)617 config.limit_arches(live_successful)618 else:619 tracker_set_rebuild_status(config, [0, 1], 2)620 if not is_live_fs_only(config):621 sync_local_mirror(config, multipidfile_state)622 if config["LOCAL"]:623 log_marker("Updating archive of local packages")624 update_local_indices(config)625 build_britney(config)626 log_marker("Extracting debootstrap scripts")627 extract_debootstrap(config)628 if config["UBUNTU_DEFAULTS_LOCALE"]:629 build_ubuntu_defaults_locale(config)630 elif is_live_fs_only(config):631 build_livecd_base(config)632 else:633 if not config["CDIMAGE_PREINSTALLED"]:634 log_marker("Germinating")635 germination = Germination(config)636 germination.run()637 log_marker("Generating new task lists")638 germinate_output = germination.output(config.project)639 germinate_output.write_tasks()640 log_marker("Checking for other task changes")641 germinate_output.update_tasks(date)642 if (config["CDIMAGE_LIVE"] or config["CDIMAGE_SQUASHFS_BASE"] or643 config["CDIMAGE_PREINSTALLED"]):644 log_marker("Downloading live filesystem images")645 download_live_filesystems(config)646 configure_splash(config)647 run_debian_cd(config)648 fix_permissions(config)649 # Temporarily turned off for live builds.650 if (config["CDIMAGE_INSTALL_BASE"] and651 not config["CDIMAGE_ADDON"] and652 not config["CDIMAGE_PREINSTALLED"]):653 log_marker("Producing installability report")654 check_installable(config)655 if not config["DEBUG"] and not config["CDIMAGE_NOPUBLISH"]:656 log_marker("Publishing")657 tree = Tree.get_daily(config)658 publisher = Publisher.get_daily(tree, image_type)659 publisher.publish(date)660 log_marker("Purging old images")661 publisher.purge()662 log_marker("Triggering mirrors")663 trigger_mirrors(config)664 log_marker("Finished")665 return True666 except Exception as e:667 for line in traceback.format_exc().splitlines():668 logger.error(line)669 sys.stdout.flush()670 sys.stderr.flush()671 if not isinstance(e, LiveBuildsFailed):672 notify_failure(config, log_path)673 return False674class SignalExit(SystemExit):675 """A variant of SystemExit indicating receipt of a signal."""676 def __init__(self, signum):677 self.signum = signum678@contextlib.contextmanager...

Full Screen

Full Screen

perf_test_runner.py

Source:perf_test_runner.py Github

copy

Full Screen

1#!/usr/bin/python2# Copyright (c) 2011 The Chromium Authors. All rights reserved.3# Use of this source code is governed by a BSD-style license that can be4# found in the LICENSE file.5"""Collection of methods for running perf tests on the legacy browser."""6import collections7import fnmatch8import optparse9import pexpect10import random11import re12import sys13import time14import urlparse15import android_commands16from base_test_runner import BaseTestRunner17from perf_tests_helper import PrintPerfResult18from run_tests_helper import *19# Match logcat output that corresponds to console.log() in JavaScript.20LEGACY_BROWSER_CONSOLE_FORMAT_RE = '.*Console: %s: ([^\s]+).*'21CHROME_CONSOLE_FORMAT_RE = '.*INFO:CONSOLE.*"%s: ([^\s"]+)".*'22# Identify browser crashes in logcat.23ACTIVITY_CRASH_RE = 'ActivityManager: Process %s \(pid \d+\) has died.'24# Log marker controlling monitor of page flip count of Surface.25SURFACE_FPS_MONITOR_START = 'startSurfaceFpsMonitor'26SURFACE_FPS_MONITOR_STOP = 'stopSurfaceFpsMonitor'27class PerfTestRunner(BaseTestRunner):28 """Class for running performance tests.29 Args:30 device: Tests will run on the device of this ID.31 """32 TARGET_TRACE_FILE = '/sdcard/prof.dat'33 def __init__(self, device):34 BaseTestRunner.__init__(self, device, 0)35 self.trace = None36 @classmethod37 def GetAllTests(cls, test_filter=None):38 """Returns a list of all tests available in the test suite."""39 all_tests = [f for f in dir(cls) if f.endswith('Benchmark')]40 if not test_filter:41 return all_tests42 re_filter = None43 try:44 re_filter = re.compile(test_filter)45 except re.exception as e:46 print 'Bad filter: ', e47 return None48 return [t for t in all_tests if re_filter.match(t)]49 @staticmethod50 def OutputFailure(msg):51 print msg52 print '[ FAILED ]'53 def _SetupBrowserPreferences(self, package):54 """Sets up the browser's preferences for perf testing.55 This includes suppressing the "restore tabs" prompt and allowing replay of56 SSL content with WPR.57 """58 # Only necessary on legacy browser, as Chrome uses command line flags.59 if package != LEGACY_BROWSER_PACKAGE:60 return61 # After force-stopping the android browser, it will display a "restore tabs"62 # prompt any time it is opened over the next 30 minutes. Since we need the63 # tabs to be restored but don't want a prompt, we set this pref to make64 # the browser think it crashed longer than 30 minutes ago.65 self.adb.SetFileContents(66 '/data/data/%s/shared_prefs/browser_recovery_prefs.xml' % package,67 """<?xml version="1.0" encoding="utf-8" standalone="yes" ?>68<map>69 <long name="last_recovered" value="0" />70</map>71""")72 # Avoid security prompts to allow WPR to serve SSL content.73 self.adb.SetFileContents(74 '/data/data/%s/shared_prefs/%s_preferences.xml' % (package, package),75 """<?xml version="1.0" encoding="utf-8" standalone="yes" ?>76<map>77 <boolean name="show_security_warnings" value="false" />78</map>79""")80 def WaitForLogMatchOrPackageCrash(self, success_re, package, crash_msg):81 """Blocks until a matching line is logged, package crash or timeout.82 Args:83 success_re: A compiled re to search each line for.84 package: Package to monitor for crash.85 msg: Additional message to be output upon crash86 Raises:87 pexpect.TIMEOUT upon the timeout specified by StartMonitoringLogcat().88 Returns:89 The re match object if |success_re| is matched first or None if crashed.90 """91 error_re = re.compile(ACTIVITY_CRASH_RE % re.escape(package))92 m = self.adb.WaitForLogMatch(success_re, error_re)93 if m:94 return m95 # TODO(tonyg): Dump crash stack here (b/5915899).96 PerfTestRunner.OutputFailure(97 '%s CRASHED while waiting for %s' % (package, crash_msg))98 return None99 def RunChromeTestLauncherPerfTest(self, url, perf_test_param, test_activity,100 expected_results, trace_tag='', timeout=30):101 """Runs a JavaScript based performance test on Chrome's TestLauncher.102 The results are printed to the console in a format suitable for the perfbot.103 Args:104 url: The URL of the JavaScript performance test. The caller is responsible105 for ensuring this URL is accessible on the phone (either by copying106 locally or starting an HTTP server + forwarder).107 perf_test_param: A param to be used by this test (such as fling speed,108 zoom distance, etc.).109 test_activity: Name of the test activity.110 expected_results: A list of tuple of (log_marker, chart_name, trace_name,111 units).112 trace_tag: An optional tag string to append to all trace_names.113 timeout: The browser is killed after this many seconds of inactivity.114 Returns:115 True if the test ran successfully.116 """117 return self._RunPerfTest(118 CHROME_PACKAGE, '%s.%s' % (CHROME_TESTS_PACKAGE, test_activity),119 '.*ChromeTest.*%s: ([^\s]+)$', url, expected_results,120 trace_tag=trace_tag, browser_extras={'speed': perf_test_param},121 timeout=timeout) != None122 def RunChromePerfTest(self, url, expected_results, trace_tag='', timeout=30):123 """Runs a JavaScript based performance test on Chrome.124 The results are printed to the console in a format suitable for the perfbot.125 Args:126 url: The URL of the JavaScript performance test. The caller is responsible127 for ensuring this URL is accessible on the phone (either by copying128 locally or starting an HTTP server + forwarder).129 expected_results: A list of tuple of (log_marker, chart_name, trace_name,130 units).131 trace_tag: An optional tag string to append to all trace_names.132 timeout: The browser is killed after this many seconds of inactivity.133 Returns:134 True if the test ran successfully.135 """136 return self._RunPerfTest(137 CHROME_PACKAGE, CHROME_ACTIVITY, CHROME_CONSOLE_FORMAT_RE,138 url, expected_results, trace_tag=trace_tag, timeout=timeout) != None139 def RunChromePerfTestResults(self, url, expected_results, trace_tag='', timeout=30):140 """Runs a JavaScript based performance test on Chrome.141 Same as RunChromePerfTest, except that this returns a list of results142 in case of success, or None on failure.143 Args:144 url: The URL of the JavaScript performance test. The caller is responsible145 for ensuring this URL is accessible on the phone (either by copying146 locally or starting an HTTP server + forwarder).147 expected_results: A list of tuple of (log_marker, chart_name, trace_name,148 units).149 trace_tag: An optional tag string to append to all trace_names.150 timeout: The browser is killed after this many seconds of inactivity.151 Returns:152 True if the test ran successfully.153 """154 return self._RunPerfTest(155 CHROME_PACKAGE, CHROME_ACTIVITY, CHROME_CONSOLE_FORMAT_RE,156 url, expected_results, trace_tag=trace_tag, timeout=timeout)157 def RunChromeUrlCyclerPerfTest(self, urls, expected_results, trace_tag='',158 timeout=30):159 """Runs a page loading performance test on Chrome.160 The results are printed to the console in a format suitable for the perfbot.161 Args:162 urls: List of URLs to load. The caller is responsible for ensuring this163 URL is accessible on the phone.164 expected_results: A list of tuple of (log_marker, units).165 trace_tag: An optional tag string to append to all trace_names.166 timeout: The browser is killed after this many seconds of inactivity.167 Returns:168 True if the test ran successfully.169 """170 return self._RunUrlCyclerPerfTest(171 CHROME_PACKAGE, CHROME_ACTIVITY, CHROME_CONSOLE_FORMAT_RE,172 urls, expected_results, trace_tag=trace_tag, timeout=timeout)173 def RunChromeBackgroundMemoryPerfTest(self, urls, expected_results,174 trace_tag='', timeout=60):175 """Measure memory usage while Chrome has the given URLs open but is hidden.176 The results are printed to the console in a format suitable for the perfbot.177 Args:178 urls: List of URLs to load. The caller is responsible for ensuring this179 URL is accessible on the phone.180 expected_results: A list of log marker strings.181 trace_tag: An optional tag string to append to all trace_names.182 timeout: The browser is killed after this many seconds of inactivity.183 Returns:184 True if the test ran successfully.185 """186 return self._RunBackgroundMemoryPerfTest(187 CHROME_PACKAGE, CHROME_ACTIVITY, CHROME_CONSOLE_FORMAT_RE,188 urls, expected_results, trace_tag=trace_tag, timeout=timeout)189 def RunLegacyBrowserBackgroundMemoryPerfTest(self, urls, expected_results,190 trace_tag='', timeout=60):191 """Measure memory usage while Browser has the given URLs open but is hidden.192 The results are printed to the console in a format suitable for the perfbot.193 Args:194 urls: List of URLs to load. The caller is responsible for ensuring this195 URL is accessible on the phone.196 expected_results: A list of log marker strings.197 trace_tag: An optional tag string to append to all trace_names.198 timeout: The browser is killed after this many seconds of inactivity.199 Returns:200 True if the test ran successfully.201 """202 return self._RunBackgroundMemoryPerfTest(203 LEGACY_BROWSER_PACKAGE, LEGACY_BROWSER_ACTIVITY,204 LEGACY_BROWSER_CONSOLE_FORMAT_RE, urls, expected_results,205 trace_tag=trace_tag, timeout=timeout)206 def RunLegacyBrowserUrlCyclerPerfTest(self, urls, expected_results,207 trace_tag='', timeout=30):208 """Runs a page loading performance test on the legacy browser.209 The results are printed to the console in a format suitable for the perfbot.210 Args:211 urls: List of URLs to load. The caller is responsible for ensuring this212 URL is accessible on the phone.213 expected_results: A list of tuple of (log_marker, units).214 trace_tag: An optional tag string to append to all trace_names.215 timeout: The browser is killed after this many seconds of inactivity.216 Returns:217 True if the test ran successfully.218 """219 return self._RunUrlCyclerPerfTest(220 LEGACY_BROWSER_PACKAGE, LEGACY_BROWSER_ACTIVITY,221 LEGACY_BROWSER_CONSOLE_FORMAT_RE, urls, expected_results,222 trace_tag=trace_tag, timeout=timeout)223 def RunLegacyBrowserPerfTest(self, url, expected_results, trace_tag='',224 timeout=30):225 """Runs a JavaScript based performance test on the legacy Android browser.226 The results are printed to the console in a format suitable for the perfbot.227 Args:228 url: The URL of the JavaScript performance test. The caller is responsible229 for ensuring this URL is accessible on the phone (either by copying230 locally or starting an HTTP server + forwarder).231 expected_results: A list of tuple of (log_marker, chart_name, trace_name,232 units).233 trace_tag: An optional tag string to append to all trace_names.234 timeout: The browser is killed after this many seconds of inactivity.235 Returns:236 True if the test ran successfully.237 """238 return self._RunPerfTest(239 LEGACY_BROWSER_PACKAGE, LEGACY_BROWSER_ACTIVITY,240 LEGACY_BROWSER_CONSOLE_FORMAT_RE, url, expected_results,241 trace_tag=trace_tag, timeout=timeout) != None242 def StartupBrowser(self, browser_package, browser_activity, token=None,243 action='android.intent.action.VIEW', url=None,244 browser_extras=None):245 """Starts the given browser to url.246 Args:247 browser_package: The package of the browser to start (e.g.248 'com.google.android.apps.chrome').249 browser_activity: The activity of the browser to start (e.g. 'Main').250 token: A unique token to identify this load.251 action: The action to pass to the browser Intent.252 url: The URL to start in the browser, may be empty.253 browser_extras: Extra data to pass to the browser Intent.254 Returns:255 The time at which the intent to start the browser was sent.256 """257 benchmark_url = None258 log_match_url_re = ''259 if url:260 benchmark_url = url261 if token:262 benchmark_url += '#' + token263 # We did not match the token in here because now Android does NOT output264 # the fragment or query params into log due to privacy concern, or output265 # them in different encoding. But you can still check it in console log266 # if you want to identify the specified load. We need to remove any267 # fragment or query params from the original url.268 (scheme, netloc, path, query, fragment) = urlparse.urlsplit(benchmark_url)269 log_match_url = urlparse.urlunsplit((scheme, netloc, path, '', ''))270 # Android may log URLs unescaped, so we must tolerate the mismatches.271 log_match_url_re = re.sub(272 r'(%[0-9A-Fa-f][0-9A-Fa-f])', r'(\1|.)', re.escape(log_match_url))273 activity_started_re = re.compile(274 '.*ActivityManager: START.*%s.*%s.*' % (275 log_match_url_re, re.escape(browser_package)))276 self.adb.StartActivity(browser_package, browser_activity, action=action,277 data=benchmark_url, extras=browser_extras,278 trace_file_name=PerfTestRunner.TARGET_TRACE_FILE279 if self.trace else None)280 m = self.WaitForLogMatchOrPackageCrash(281 activity_started_re, browser_package, url)282 assert m283 start_line = m.group(0)284 print 'Starting %s...' % browser_package285 return android_commands.GetLogTimestamp(start_line)286 def CollectTraceIfNeeded(self, host_file_name, delay_until_trace_ready):287 """Collect both traceview and chrome trace files from the device."""288 if not self.trace:289 return290 host_file_name = os.path.join(CHROME_DIR, host_file_name.replace('/', '_'))291 print 'Waiting for tracing to complete...'292 time.sleep(max(delay_until_trace_ready, 2.5))293 print 'Collecting traceview file: %s' % host_file_name294 host_trace_view = host_file_name + '.traceview'295 for i in xrange(sys.maxint):296 if not os.path.exists(host_trace_view):297 break298 host_trace_view = host_file_name +'_' + str(i) + '.traceview'299 self.adb.Adb().Pull(PerfTestRunner.TARGET_TRACE_FILE, host_trace_view)300 CHROME_TRACE_DIRECTORY = '/sdcard/Download/'301 CHROME_TRACE_FILE_PATTERN = 'chrome-profile-results-*'302 device_contents = self.adb.ListPathContents(CHROME_TRACE_DIRECTORY)303 for device_content in device_contents:304 if fnmatch.fnmatch(device_content, CHROME_TRACE_FILE_PATTERN):305 print 'Collecting chrome_trace: %s' % device_content306 device_content = os.path.join(CHROME_TRACE_DIRECTORY, device_content)307 trace_event_name = os.path.join(CHROME_DIR,308 host_file_name + '_' +309 os.path.basename(device_content) +310 '.chrometrace')311 self.adb.Adb().Pull(device_content, trace_event_name)312 self.adb.RunShellCommand('rm %s' % device_content)313 def _RunUrlCyclerPerfTest(self, browser_package, browser_activity,314 browser_console_log_re, urls, expected_results,315 trace_tag='', timeout=30):316 """Runs a JavaScript based performance test on a list of cycling URLs.317 The results are printed to the console in a format suitable for the perfbot.318 Args:319 browser_package: The package of the browser to start (e.g.320 'com.google.android.apps.chrome').321 browser_activity: The activity of the browser to start (e.g. 'Main').322 browser_console_log_re: Regular expression string which identifies323 console.log output in adb logcat. Must contain a %s placeholder for324 the log_marker.325 urls: List of URLs to load. The caller is responsible for ensuring this326 URL is accessible on the phone.327 expected_results: A list of tuple of (log_marker, units).328 trace_tag: An optional tag string to append to all trace_names.329 timeout: The browser is killed after this many seconds of inactivity.330 Returns:331 True if the test ran successfully.332 """333 self.adb.StartMonitoringLogcat(timeout=timeout)334 self.adb.ClearApplicationState(browser_package)335 self.StartupBrowser(browser_package, browser_activity, url=urls[0])336 results = collections.defaultdict(list)337 pss_usages = []338 private_dirty_usages = []339 try:340 for i, url in enumerate(urls):341 group_floor = 1 + 10 * (i / 10)342 group_ceil = group_floor + 9343 group = '%d-%d' % (group_floor, group_ceil)344 display_url = url.replace('http://', '')345 for log_marker, units in expected_results:346 result_re_str = browser_console_log_re % (347 log_marker + '_' + re.escape(url))348 result_re = re.compile(result_re_str)349 m = self.WaitForLogMatchOrPackageCrash(350 result_re, browser_package, url)351 if not m:352 return False353 result = m.group(1).split(',')354 results[log_marker] += result355 # Create chart tabs for 1-10, 11-20, etc.356 # Each tab displays 10 individual page traces.357 PrintPerfResult(log_marker + trace_tag + '_' + group,358 log_marker + '_' + display_url,359 result, units)360 # Sample memory usage after each collection of expectations.361 memory_usage = self.adb.GetMemoryUsage(browser_package)362 pss_usages.append(memory_usage['Pss'])363 private_dirty_usages.append(memory_usage['Private_Dirty'])364 PrintPerfResult('pss' + trace_tag + '_' + group, 'pss_' + display_url,365 [memory_usage['Pss']], 'kb')366 PrintPerfResult('private_dirty' + trace_tag + '_' + group,367 'private_dirty_' + display_url,368 [memory_usage['Private_Dirty']], 'kb')369 if 'Nvidia' in memory_usage:370 PrintPerfResult('nvidia' + trace_tag + '_' + group,371 'nvidia_' + display_url,372 [memory_usage['Nvidia']], 'kb')373 # Create a chart tabs for averages of all pages.374 for log_marker, units in expected_results:375 PrintPerfResult(log_marker + '_avg', log_marker + '_avg' + trace_tag,376 results[log_marker], units)377 PrintPerfResult('pss_avg', 'pss_avg' + trace_tag, pss_usages, 'kb')378 PrintPerfResult('private_dirty_avg', 'private_dirty_avg' + trace_tag,379 private_dirty_usages, 'kb')380 except pexpect.TIMEOUT:381 PerfTestRunner.OutputFailure(382 'Timed out after %d seconds while waiting for %s' % (timeout,383 result_re_str))384 return False385 finally:386 self.adb.CloseApplication(browser_package)387 return True388 def _RunBackgroundMemoryPerfTest(self, browser_package, browser_activity,389 browser_console_log_re, urls,390 expected_results, trace_tag='', timeout=30):391 """Measure memory usage while browser has the given URLs open but is hidden.392 The results are printed to the console in a format suitable for the perfbot.393 Args:394 browser_package: The package of the browser to start (e.g.395 'com.google.android.apps.chrome').396 browser_activity: The activity of the browser to start (e.g. 'Main').397 browser_console_log_re: Regular expression string which identifies398 console.log output in adb logcat. Must contain a %s placeholder for399 the log_marker.400 urls: List of URLs to load. The caller is responsible for ensuring this401 URL is accessible on the phone.402 expected_results: A list of log marker strings.403 trace_tag: An optional tag string to append to all trace_names.404 timeout: The browser is killed after this many seconds of inactivity.405 Returns:406 True if the test ran successfully.407 """408 self.adb.StartMonitoringLogcat(timeout=timeout)409 self.adb.ClearApplicationState(browser_package)410 try:411 for url in urls:412 self.StartupBrowser(browser_package, browser_activity, url=url,413 browser_extras={'create_new_tab': True})414 # Wait for results to ensure the page loaded. We don't log any values.415 for expected_result in expected_results:416 result_re_str = browser_console_log_re % re.escape(expected_result)417 result_re = re.compile(result_re_str)418 m = self.WaitForLogMatchOrPackageCrash(419 result_re, browser_package, url)420 if not m:421 return False422 self.adb.StartActivity(package=None, activity=None,423 action="android.intent.action.MAIN",424 category="android.intent.category.HOME",425 wait_for_completion=True)426 memory_usage = self.adb.GetMemoryUsage(browser_package)427 PrintPerfResult('pss' + trace_tag, 'pss',428 [memory_usage['Pss']], 'kb')429 PrintPerfResult('private_dirty' + trace_tag, 'private_dirty',430 [memory_usage['Private_Dirty']], 'kb')431 if 'Nvidia' in memory_usage:432 PrintPerfResult('nvidia' + trace_tag, 'nvidia',433 [memory_usage['Nvidia']], 'kb')434 time.sleep(5)435 # TODO(tonyg, husky): factor out this common code! (across all methods)436 memory_usage = self.adb.GetMemoryUsage(browser_package)437 PrintPerfResult('pss' + trace_tag, 'pss_after_5_secs',438 [memory_usage['Pss']], 'kb')439 PrintPerfResult('private_dirty' + trace_tag, 'private_dirty_after_5_secs',440 [memory_usage['Private_Dirty']], 'kb')441 if 'Nvidia' in memory_usage:442 PrintPerfResult('nvidia' + trace_tag, 'nvidia_after_5_secs',443 [memory_usage['Nvidia']], 'kb')444 except pexpect.TIMEOUT:445 PerfTestRunner.OutputFailure(446 'Timed out after %d seconds while waiting for page to load' % timeout)447 return False448 finally:449 self.adb.CloseApplication(browser_package)450 return True451 def _RunPerfTest(self, browser_package, browser_activity,452 browser_console_log_re, url, expected_results,453 trace_tag='', browser_extras=None, timeout=30):454 """Runs a JavaScript based performance test.455 The results are printed to the console in a format suitable for the perfbot.456 Args:457 browser_package: The package of the browser to start (e.g.458 'com.google.android.apps.chrome').459 browser_activity: The activity of the browser to start (e.g. '.Main' or460 'com.google.android.apps.chrome.Main').461 browser_console_log_re: Regular expression string which identifies462 console.log output in adb logcat. Must contain a %s placeholder for463 the log_marker.464 url: The URL of the JavaScript performance test. The caller is responsible465 for ensuring this URL is accessible on the phone (either by copying466 locally or starting an HTTP server + forwarder).467 expected_results: A list of tuple of (log_marker, chart_name, trace_name,468 units). log_marker is usually shown as the graph name, with the469 exception of SURFACE_FPS_MONITOR_START/STOP which are used to control470 the monitor of page flip count of Surface.471 trace_tag: An optional tag string to append to all trace_names.472 browser_extras: Extra data to pass to the browser Intent.473 timeout: The browser is killed after this many seconds of inactivity.474 Returns:475 List of results if test ran successfully. None upon failure.476 """477 self.adb.StartMonitoringLogcat(timeout=timeout)478 self.adb.ClearApplicationState(browser_package)479 error_re = re.compile(ACTIVITY_CRASH_RE % re.escape(browser_package))480 results = []481 io_stats_before = self.adb.GetIoStats()482 self.StartupBrowser(browser_package, browser_activity,483 token=str(random.randint(100000, 999999)), url=url,484 browser_extras=browser_extras)485 try:486 for log_marker, chart_name, trace_name, units in expected_results:487 result_re_str = browser_console_log_re % re.escape(log_marker)488 result_re = re.compile(result_re_str)489 m = self.WaitForLogMatchOrPackageCrash(result_re, browser_package, url)490 if not m:491 return None492 # For certain tests, the result is a list enclosed in braces, as in:493 # '{3.134553, 40389443}'; remove these if we find them before494 # splitting, otherwise we'll get an error when converting result[0]495 # to a float below. Same for angle brackets which also happen.496 result = m.group(1)497 if len(result) > 2:498 if result[0] == '{' and result[-1] == '}':499 result = result[1:-1]500 elif result[0] == '[' and result[-1] == ']':501 result = result[1:-1]502 result = result.split(',')503 results.append(float(result[0]))504 if log_marker == SURFACE_FPS_MONITOR_START:505 surface_before = self.adb.GetSurfaceStats()506 elif log_marker == SURFACE_FPS_MONITOR_STOP:507 surface_after = self.adb.GetSurfaceStats()508 td = surface_after['timestamp'] - surface_before['timestamp']509 seconds = td.seconds + td.microseconds / 1e6510 print 'SurfaceMonitorTime: %fsecs' % seconds511 surface_fps = (surface_after['page_flip_count'] -512 surface_before['page_flip_count']) / seconds513 PrintPerfResult('avg_surface_fps', 'avg_surface_fps' + trace_tag,514 [int(round(surface_fps))], 'fps')515 else:516 PrintPerfResult(chart_name, trace_name + trace_tag, result, units)517 memory_usage = self.adb.GetMemoryUsage(browser_package)518 PrintPerfResult('pss_final_t', 'pss_final_t' + trace_tag,519 [memory_usage['Pss']], 'kb')520 PrintPerfResult('private_dirty_final_t',521 'private_dirty_final_t' + trace_tag,522 [memory_usage['Private_Dirty']], 'kb')523 if 'Nvidia' in memory_usage:524 PrintPerfResult('nvidia_final_t', 'nvidia_final_t' + trace_tag,525 [memory_usage['Nvidia']], 'kb')526 io_stats_after = self.adb.GetIoStats()527 for stat in io_stats_after:528 PrintPerfResult(stat, stat + trace_tag,529 [io_stats_after[stat] - io_stats_before[stat]],530 stat.split('_')[1])531 except pexpect.TIMEOUT:532 PerfTestRunner.OutputFailure(533 'Timed out after %d seconds while waiting for %s' % (timeout,534 result_re_str))535 return None536 finally:537 self.adb.CloseApplication(browser_package)...

Full Screen

Full Screen

test_wallets.py

Source:test_wallets.py Github

copy

Full Screen

1from random import Random2import re3from typing import Pattern, Tuple4import pytest5from rchain.crypto import PrivateKey6from docker.client import DockerClient7from .common import (8 CommandLineOptions,9 random_string,10)11from .conftest import (12 testing_context,13)14from .rnode import (15 Node,16 started_bootstrap_with_network,17)18from .common import (19 TestingContext,20 TransderFundsError,21)22from .wait import (23 wait_for_log_match_result,24 wait_for_log_match_result_raise,25 wait_for_approved_block_received_handler_state,26 WaitTimeoutError27)28ALICE_KEY = PrivateKey.from_hex("b2527b00340a83e302beae2a8daf6d654e8e57541acfa261cc1b5635eb16aa15")29BOB_KEY = PrivateKey.from_hex("9a801debae8bb97fe54c99389cafa576c60612503348578125b65ab182ff5850")30CHARLIE_KEY = PrivateKey.from_hex("567ea426deaeb8233f134c3a266149fb196d6eea7d28b447dfefff92002cb400")31def wait_transfer_result(context: TestingContext, node: Node, transfer_funds_result_pattern: Pattern) -> None:32 transfer_result_match = wait_for_log_match_result_raise(context, node, transfer_funds_result_pattern)33 reason = transfer_result_match.group('reason')34 if reason != "Nil":35 raise TransderFundsError(reason)36def deploy_transfer(log_marker: str, node: Node, from_rev_addr: str, to_rev_addr: str, amount: int, private_key: PrivateKey, phlo_limit: int, phlo_price: int) -> str:37 return node.deploy_contract_with_substitution(38 substitute_dict={"%FROM": from_rev_addr, "%TO": to_rev_addr, "%AMOUNT": str(amount), "%LOG_MARKER": log_marker},39 rho_file_path="resources/wallets/transfer_funds.rho",40 private_key=private_key,41 phlo_limit=phlo_limit,42 phlo_price=phlo_price43 )44def transfer_funds(context: TestingContext, node: Node, from_rev_addr: str, to_rev_addr: str, amount: int, private_key: PrivateKey, phlo_limit: int, phlo_price: int) -> None:45 """46 Transfer rev from one vault to another vault.47 If the transfer is processed successfully, it would return None.48 If the transfer fail to be processed, it would raise "TransferFundsError".49 """50 log_marker = random_string(context, 10)51 transfer_funds_result_pattern = re.compile('"{} (Successfully|Failing) reason: (?P<reason>[a-zA-Z0-9 ]*)"'.format(log_marker))52 deploy_transfer(log_marker, node, from_rev_addr, to_rev_addr, amount, private_key, phlo_limit, phlo_price)53 wait_transfer_result(context, node, transfer_funds_result_pattern)54def get_vault_balance(context: TestingContext, node: Node, rev_addr: str, private_key: PrivateKey, phlo_limit: int, phlo_price: int) -> Tuple[str, int]:55 log_marker = random_string(context, 10)56 check_balance_pattern = re.compile('"{} Vault (?P<rev_addr>[a-zA-Z0-9]*) balance is (?P<balance>[0-9]*)"'.format(log_marker))57 blockHash = node.deploy_contract_with_substitution(58 substitute_dict={"%REV_ADDR": rev_addr, "%LOG_MARKER": log_marker},59 rho_file_path="resources/wallets/get_vault_balance.rho",60 private_key=private_key,61 phlo_limit=phlo_limit,62 phlo_price=phlo_price63 )64 check_balance_match = wait_for_log_match_result(context, node, check_balance_pattern)65 return (blockHash, int(check_balance_match.group("balance")))66def test_alice_pay_bob(command_line_options: CommandLineOptions, docker_client: DockerClient, random_generator: Random) -> None:67 genesis_vault = {68 ALICE_KEY: 5000000069 }70 with testing_context(command_line_options, random_generator, docker_client, wallets_dict=genesis_vault) as context, \71 started_bootstrap_with_network(context=context) as bootstrap:72 wait_for_approved_block_received_handler_state(context, bootstrap)73 transfer_amount = 2000000074 alice_rev_address = ALICE_KEY.get_public_key().get_rev_address()75 bob_rev_address = BOB_KEY.get_public_key().get_rev_address()76 _, alice_balance = get_vault_balance(context, bootstrap, alice_rev_address, ALICE_KEY, 1000000, 1)77 _, bob_balance = get_vault_balance(context, bootstrap, bob_rev_address, ALICE_KEY, 1000000, 1)78 assert alice_balance == 50000000 - 100000079 assert bob_balance == 080 transfer_funds(context, bootstrap, alice_rev_address, bob_rev_address, transfer_amount, ALICE_KEY, 1000000, 1)81 _, alice_balance = get_vault_balance(context, bootstrap, alice_rev_address, ALICE_KEY, 1000000, 1)82 _, bob_balance = get_vault_balance(context, bootstrap, bob_rev_address, ALICE_KEY, 1000000, 1)83 assert bob_balance == transfer_amount84def test_transfer_failed_with_invalid_key(command_line_options: CommandLineOptions, docker_client: DockerClient, random_generator: Random) -> None:85 genesis_vault = {86 CHARLIE_KEY: 50000000,87 ALICE_KEY: 5000000088 }89 with testing_context(command_line_options, random_generator, docker_client, wallets_dict=genesis_vault) as context, \90 started_bootstrap_with_network(context=context) as bootstrap:91 wait_for_approved_block_received_handler_state(context, bootstrap)92 bob_rev_address = BOB_KEY.get_public_key().get_rev_address()93 charlie_rev_address = CHARLIE_KEY.get_public_key().get_rev_address()94 _, bob_balance = get_vault_balance(context, bootstrap, bob_rev_address, CHARLIE_KEY, 1000000, 1)95 assert bob_balance == 096 with pytest.raises(TransderFundsError) as e:97 transfer_funds(context, bootstrap, charlie_rev_address, bob_rev_address, 100, ALICE_KEY, 1000000, 1)98 assert e.value.reason == "Invalid AuthKey"99 _, bob_balance = get_vault_balance(context, bootstrap, bob_rev_address, CHARLIE_KEY, 1000000, 1)100 assert bob_balance == 0101def test_transfer_failed_with_insufficient_funds(command_line_options: CommandLineOptions, docker_client: DockerClient, random_generator: Random) -> None:102 genesis_vault = {103 CHARLIE_KEY: 5000000,104 ALICE_KEY: 1000000105 }106 with testing_context(command_line_options, random_generator, docker_client, wallets_dict=genesis_vault) as context, \107 started_bootstrap_with_network(context=context) as bootstrap:108 wait_for_approved_block_received_handler_state(context, bootstrap)109 bob_rev_address = BOB_KEY.get_public_key().get_rev_address()110 alice_rev_address = ALICE_KEY.get_public_key().get_rev_address()111 _, bob_balance = get_vault_balance(context, bootstrap, bob_rev_address, CHARLIE_KEY, 1000000, 1)112 _, alice_balance = get_vault_balance(context, bootstrap, alice_rev_address, CHARLIE_KEY, 1000000, 1)113 assert bob_balance == 0114 assert alice_balance < 2000000115 with pytest.raises(TransderFundsError) as e:116 transfer_funds(context, bootstrap, alice_rev_address, bob_rev_address, 2000000, ALICE_KEY, 1000000, 1)117 assert e.value.reason == "Insufficient funds"118 _, bob_balance = get_vault_balance(context, bootstrap, bob_rev_address, CHARLIE_KEY, 1000000, 1)119 assert bob_balance == 0120def test_transfer_to_not_exist_vault(command_line_options: CommandLineOptions, docker_client: DockerClient, random_generator: Random) -> None:121 genesis_vault = {122 CHARLIE_KEY: 500000000,123 ALICE_KEY: 500000000124 }125 not_exist_vault = PrivateKey.generate()126 with testing_context(command_line_options, random_generator, docker_client, wallets_dict=genesis_vault) as context, \127 started_bootstrap_with_network(context=context) as bootstrap:128 transfer_amount = 2000000129 wait_for_approved_block_received_handler_state(context, bootstrap)130 alice_rev_address = ALICE_KEY.get_public_key().get_rev_address()131 no_exist_address = not_exist_vault.get_public_key().get_rev_address()132 _, alice_balance = get_vault_balance(context, bootstrap, alice_rev_address, CHARLIE_KEY, 1000000, 1)133 assert alice_balance == 500000000134 with pytest.raises(WaitTimeoutError):135 # transfer to a vault which does not exist in the genesis vault136 # the result can not be got because the vault is not created in the tuplespace137 log_marker = random_string(context, 10)138 transfer_funds_result_pattern = re.compile('"{} (Successfully|Failing) reason: (?P<reason>[a-zA-Z0-9 ]*)"'.format(log_marker))139 deploy_transfer(log_marker, bootstrap, alice_rev_address, no_exist_address, transfer_amount, ALICE_KEY, 1000000, 1)140 wait_transfer_result(context, bootstrap, transfer_funds_result_pattern)141 # the get_vault_balance contract would call the method `findOrCreate` to generate the not-exist vault142 # then the transfer above can get the continuation and transfer is done143 _, no_vault_balance = get_vault_balance(context, bootstrap, no_exist_address, CHARLIE_KEY, 1000000, 1)144 wait_transfer_result(context, bootstrap, transfer_funds_result_pattern)...

Full Screen

Full Screen

whitelist.py

Source:whitelist.py Github

copy

Full Screen

...18 self.ftp = FTP_marker()19 self.createpath = CREATE()20 #检查目录是否存在,如果不存在创建日志文件21 self.createpath.create_path(extra_path=self.config.get_value(section='LOGGER',key='LOG_PATH'))22 #self.logger1 = Log_marker().log_marker(log_path=self.config.get_value(section='LOGGER',key='LOG_PATH'),23 #log_level=self.config.get_value(section='LOGGER',key='LOG_LEVEL'),24 #log_name='apscheduler.scheduler')25 self.logger = Log_marker().log_marker(log_path=self.config.get_value(section='LOGGER',key='LOG_PATH'),26 log_level=self.config.get_value(section='LOGGER',key='LOG_LEVEL'),27 log_name=self.config.get_value(section='LOGGER',key='LOG_NAME'))28 def ftp_get_whitelist(self,local,remote,localp,remotep):29 result = self.ftp.ftpget(localfile=local,30 remotefile=remote, localpath=localp,31 remotepath=remotep)32 if result == 'get_ok':33 self.logger.info('%s File update success' % remote)34 if not os.path.isfile('/mds/aac/aac_V1.0.2/bin/restart_auth.sh'):35 self.logger.error('/mds/aac/aac_V1.0.2/bin/restart_auth.sh No such file or directory')36 else:37 os.popen("sh /mds/aac/aac_V1.0.2/bin/restart_auth.sh")38 self.logger.info('authentication Service restart success')39 #执行成功后写入数据库的案例...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful