How to use local_conf_dir method in Slash

Best Python code snippet using slash

fabfile.py

Source:fabfile.py Github

copy

Full Screen

1import datetime2import logging3import os4import sys5from fabric.api import env, lcd, task, sudo, local, prefix, cd, settings, require6from fabric.contrib.files import upload_template, contains, append, exists7from fabric.operations import put, prompt8from steepshot_io.deploy_settings import (9 USER, WEB_HOST, LANDING_HOST, REMOTE_DEPLOY_DIR, PROJECT_NAME, REPOSITORY,10 DEPLOY_DIR, UBUNTU_PACKAGES, WORKON_HOME, ENV_NAME, LOCAL_CONF_DIR,11 ENV_PATH, DATABASE_URL, DB_USER, DB_PASSWORD, DB_NAME,12 GUNI_PORT, GUNI_WORKERS, GUNI_TIMEOUT, GUNI_GRACEFUL_TIMEOUT,13 STATIC_ROOT, STATIC_URL, MEDIA_ROOT, MEDIA_URL,14 DEPLOYMENT_USER, DEPLOYMENT_GROUP, ENVIRONMENTS,15 USER_PROFILE_FILE, VENV_ACTIVATE,16 BACKEND_SERVICE, CELERY_SERVICE,17 WEBAPP_STATIC_DIR,18)19# This allows us to have .profile to be read when calling sudo20# and virtualenvwrapper being activated using non-SSH user21SUDO_PREFIX = 'sudo -i'22FRONTEND_LOCAL_DIR = os.path.abspath(os.path.join('..', 'steepshot-web'))23FRONTEND_BUILD_COMMAND = 'gulp build'24logging.basicConfig(level=logging.INFO)25logger = logging.getLogger('fabfile')26def _get_current_datetime() -> str:27 now = datetime.datetime.now()28 return now.strftime('%d-%m-%Y_%H-%M-%S')29def _get_systemd_service_path(service_name):30 if not service_name.endswith(('.service', '.unit')):31 service_name += '.service'32 return '/etc/systemd/system/{}'.format(service_name)33def _load_environment(env_name: str):34 """35 Sets specified environment36 """37 if env_name not in ENVIRONMENTS:38 raise ValueError("Incorrect environment name ({}). "39 "Valid options are: {}"40 .format(env_name, ENVIRONMENTS.keys()))41 _env = ENVIRONMENTS[env_name]42 env.user = _env['USER']43 env.hosts = ["{host}:{port}".format(host=_env['HOST'],44 port=_env['SSH_PORT'])]45 env.host_url = _env['HOST']46 env.branch = _env['GIT_BRANCH']47 env.current_host = _env['CURRENT_HOST']48 env.env_name = env_name49 env.settings_module = _env['SETTINGS_MODULE']50 env.key_filename = _env['KEY_FILENAME']51 env.is_certbot_cert = _env.get('IS_CERTBOT_CERT')52 env.web_host = _env.get('WEBAPP_HOST', env.current_host)53@task54def create_non_priveledged_user():55 with settings(warn_only=True):56 sudo('adduser --disabled-login --gecos os {}'.format(DEPLOYMENT_USER))57 sudo('addgroup {}'.format(DEPLOYMENT_GROUP))58 sudo('adduser {user} {group}'59 .format(user=DEPLOYMENT_USER, group=DEPLOYMENT_GROUP))60@task61def prod():62 """63 Makes sure prod environment is enabled64 """65 _load_environment('PROD')66@task67def spa():68 _load_environment('SPA_PROD')69@task70def spa_qa():71 _load_environment('SPA_QA')72@task73def shell():74 os.execlp('ssh', '-C', '-i', env.key_filename, '%(user)s@%(host)s' % {'user': USER, 'host': env.HOST})75@task76def install_system_packages():77 # sudo('add-apt-repository ppa:fkrull/deadsnakes -y')78 sudo('add-apt-repository ppa:deadsnakes/ppa -y')79 if env.is_certbot_cert:80 sudo('add-apt-repository ppa:certbot/certbot -y')81 sudo('sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927')82 sudo('echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee '83 '/etc/apt/sources.list.d/mongodb-org-3.2.list')84 with settings(warn_only=True):85 sudo('apt-get update')86 if env.is_certbot_cert:87 sudo('apt-get -y --no-upgrade install %s' % ' '.join(UBUNTU_PACKAGES))88 else:89 UBUNTU_PACKAGES.remove('python-certbot-nginx')90 sudo('apt-get -y --no-upgrade install %s' % ' '.join(UBUNTU_PACKAGES))91@task92def checkout_repository():93 with cd(REMOTE_DEPLOY_DIR), settings(sudo_user=DEPLOYMENT_USER):94 # TODO: may be it is better to remove already95 # present directory96 if not exists(PROJECT_NAME, use_sudo=True):97 sudo('git clone %s %s' % (REPOSITORY, PROJECT_NAME))98 sudo('chown -R {user}:{group} {dir}'99 .format(user=DEPLOYMENT_USER,100 group=DEPLOYMENT_GROUP,101 dir=PROJECT_NAME))102@task103def create_deploy_dirs():104 with cd(DEPLOY_DIR):105 sudo('mkdir -p staticfiles logs pid uploads',106 user=DEPLOYMENT_USER)107@task108def enable_and_start_redis():109 """110 Enable and starts redis server111 """112 sudo('systemctl enable redis-server')113 sudo('systemctl start redis-server')114@task115def prepare():116 install_system_packages()117 checkout_repository()118 create_deploy_dirs()119 prepare_virtualenv()120 create_database()121 enable_and_start_redis()122 if exists('/etc/nginx/sites-available/default'):123 with settings(warn_only=True):124 sudo('rm /etc/nginx/sites-available/default')125def add_virtualenv_settings_to_profile(profile_file):126 if not exists(profile_file):127 logger.info("Creating user profile: {}".format(profile_file))128 sudo('touch %s' % profile_file,129 user=DEPLOYMENT_USER)130 lines_to_append = [131 'export WORKON_HOME=%s' % WORKON_HOME,132 'export PROJECT_HOME=%s' % REMOTE_DEPLOY_DIR,133 'source /usr/local/bin/virtualenvwrapper.sh',134 ]135 for line in lines_to_append:136 if not contains(profile_file, line):137 append(profile_file, '\n' + line,138 use_sudo=True)139 sudo('chown {user}:{group} {file}'140 .format(user=DEPLOYMENT_USER,141 group=DEPLOYMENT_GROUP,142 file=profile_file))143@task144def prepare_virtualenv():145 logger.info("Setting up the virtual environment.")146 sudo('pip install virtualenv')147 sudo('pip install virtualenvwrapper')148 add_virtualenv_settings_to_profile(USER_PROFILE_FILE)149 with prefix('source %s' % USER_PROFILE_FILE):150 with settings(warn_only=True), cd(REMOTE_DEPLOY_DIR):151 logger.info("Creating new virualenv.")152 sudo('mkvirtualenv %s -p /usr/bin/python3.5' % ENV_NAME,153 user=DEPLOYMENT_USER)154 config_virtualenv()155@task156def config_virtualenv():157 remote_postactivate_path = os.path.join(WORKON_HOME, ENV_NAME,158 'bin/postactivate')159 postactivate_context = {160 'DATABASE_URL': DATABASE_URL,161 'SETTINGS_MODULE': env.settings_module,162 'IS_CERTBOT_CERT': env.is_certbot_cert,163 'DOMAIN_NAME': env.current_host,164 }165 upload_template(os.path.join(LOCAL_CONF_DIR, 'postactivate'),166 remote_postactivate_path, context=postactivate_context,167 use_sudo=True)168@task169def create_database():170 """171 Create postgres database and dedicated user172 """173 logger.info("Setting the database.")174 with settings(warn_only=True):175 # Create database user176 with prefix("export PGPASSWORD=%s" % DB_PASSWORD):177 sudo('psql -c "CREATE ROLE %s WITH CREATEDB CREATEUSER LOGIN ENCRYPTED PASSWORD \'%s\';"' % (178 DB_USER, DB_PASSWORD),179 user='postgres')180 sudo('psql -c "CREATE DATABASE %s WITH OWNER %s"' % (DB_NAME, DB_USER),181 user='postgres')182@task183def install_req():184 logger.info("Installing python requirements.")185 with cd(DEPLOY_DIR), prefix('source %s' % VENV_ACTIVATE):186 with settings(sudo_user=DEPLOYMENT_USER):187 cache_dir = os.path.join(DEPLOY_DIR, '.cache')188 sudo('pip install -U pip')189 # We avoid using cache as sometimes PIP does not190 # see new added requirements191 sudo('pip install --no-cache-dir -r {req_file}'192 .format(cache=cache_dir, req_file='requirements.txt'))193@task194def deploy_files():195 with cd(DEPLOY_DIR), settings(sudo_user=DEPLOYMENT_USER):196 sudo('git fetch')197 sudo('git reset --hard')198 sudo('git checkout {}'.format(env.branch))199 sudo('git pull origin {}'.format(env.branch))200@task201def config_crontab():202 crontab_file = os.path.join(LOCAL_CONF_DIR, 'crontab')203 with settings(warn_only=True):204 # There may be no previous crontab so205 # crontab will fail206 backup_file = '/tmp/crontab-%s' % _get_current_datetime()207 logger.info("Backing up existing crontab")208 sudo('crontab -l > %s' % backup_file)209 logger.info("Uploading new crontab")210 put(crontab_file, '/tmp/new-crontab')211 logger.info("Setting new crontab")212 sudo('crontab < /tmp/new-crontab')213@task214def clean_pyc():215 """216 Cleans up redundant python bytecode files.217 """218 logger.info("Cleaning .pyc files.")219 with cd(DEPLOY_DIR):220 sudo("find . -name '*.pyc'")221 sudo('find . -name \*.pyc -delete')222@task223def migrate():224 with cd(DEPLOY_DIR):225 with settings(sudo_user=DEPLOYMENT_USER,226 sudo_prefix=SUDO_PREFIX), prefix('workon steepshot_io'):227 sudo('python manage.py migrate')228def config_celery(remote_conf_path):229 """230 Copy celery related config files231 """232 require('settings_module')233 upload_template(os.path.join(LOCAL_CONF_DIR, 'celery.sh'),234 remote_conf_path,235 context={236 'DEPLOY_DIR': DEPLOY_DIR,237 'ENV_PATH': ENV_PATH,238 'SETTINGS_MODULE': env.settings_module,239 }, mode=0o0750, use_sudo=True)240def install_service(service_name, context):241 """242 Copies and enables specified systemd service243 to the remote server.244 """245 logger.info('Copying systemd services "%s"', service_name)246 remote_service = _get_systemd_service_path(service_name)247 local_template = os.path.join(LOCAL_CONF_DIR, service_name)248 if not os.path.exists(local_template):249 msg = 'Template "%s" does not exist.' % local_template250 logger.error(msg)251 raise ValueError(msg)252 upload_template(local_template,253 remote_service,254 context=context,255 use_sudo=True,256 backup=False)257 sudo('systemctl daemon-reload')258 # Autostart unit259 sudo('systemctl enable {}'.format(service_name))260@task261def install_systemd_services():262 services = (BACKEND_SERVICE, CELERY_SERVICE)263 common_context = {264 'PROJECT_NAME': PROJECT_NAME,265 'USER': DEPLOYMENT_USER,266 'GROUP': DEPLOYMENT_GROUP,267 'DEPLOY_DIR': DEPLOY_DIR,268 }269 for service in services:270 install_service(service, common_context)271@task272def deploy_nginx_config():273 require('host_url', 'env_name')274 remote_sa_path = '/etc/nginx/sites-available/%s' % PROJECT_NAME275 context = {276 'HOST': env.host_url,277 'CURRENT_HOST': env.current_host,278 'ENV': env.env_name,279 'DEPLOY_DIR': DEPLOY_DIR,280 'GUNI_PORT': GUNI_PORT,281 'STATIC_ROOT': STATIC_ROOT,282 'STATIC_URL': STATIC_URL,283 'MEDIA_ROOT': MEDIA_ROOT,284 'MEDIA_URL': MEDIA_URL285 }286 upload_template(template_dir=LOCAL_CONF_DIR,287 filename='nginx.conf.j2',288 destination=remote_sa_path,289 context=context,290 use_sudo=True,291 use_jinja=True)292 sudo('ln -sf %s /etc/nginx/sites-enabled' % remote_sa_path)293@task294def config(restart_after=True):295 require('current_host', 'hosts', 'settings_module')296 # /etc/nginx/nginx.conf change user from www-data to root297 remote_conf_path = '%s/conf' % DEPLOY_DIR298 # remote_ssl_certificate_path = '/etc/ssl/certs'299 sudo('mkdir -p %s' % remote_conf_path,300 user=DEPLOYMENT_USER)301 GUNI_HOST = '0.0.0.0' if env.env_name == 'VAGRANT' else '127.0.0.1'302 upload_template(os.path.join(LOCAL_CONF_DIR, 'gunicorn.sh'), remote_conf_path, context={303 'DEPLOY_DIR': DEPLOY_DIR,304 'ENV_PATH': ENV_PATH,305 'SETTINGS_MODULE': env.settings_module,306 'GUNI_HOST': GUNI_HOST,307 'GUNI_PORT': GUNI_PORT,308 'GUNI_WORKERS': GUNI_WORKERS,309 'GUNI_TIMEOUT': GUNI_TIMEOUT,310 'GUNI_GRACEFUL_TIMEOUT': GUNI_GRACEFUL_TIMEOUT,311 'USER': DEPLOYMENT_USER,312 'GROUP': DEPLOYMENT_GROUP,313 'PROJECT_NAME': PROJECT_NAME314 }, mode=0o0750, use_sudo=True)315 config_celery(remote_conf_path)316 # TODO: replace it with systemd unit317 install_systemd_services()318 deploy_nginx_config()319 sudo('chown -R {}:{} {}'.format(DEPLOYMENT_USER, DEPLOYMENT_GROUP, remote_conf_path))320 # sudo('systemd daemon-reload')321 # path = os.path.join(LOCAL_CONF_DIR, 'ssl_certificate', 'www.steepshot.org.certchain.crt')322 # upload_template(path, remote_ssl_certificate_path, context={}, use_sudo=True),323 config_virtualenv()324 if restart_after:325 with settings(warn_only=True):326 restart()327def _is_systemd_service_running(service_name):328 with settings(warn_only=True):329 status_reply = sudo('systemctl --no-pager --full status %s'330 % service_name)331 return 'inactive' not in status_reply332def restart_systemd_service(service_name):333 with settings(warn_only=True):334 if _is_systemd_service_running(service_name):335 sudo('systemctl stop %s' % service_name)336 sudo('systemctl start %s' % service_name)337@task338def restart():339 services_to_restart = [BACKEND_SERVICE,340 CELERY_SERVICE]341 for service_name in services_to_restart:342 restart_systemd_service(service_name)343 sudo('service nginx restart')344@task345def restart_backend():346 sudo('systemctl stop %s' % BACKEND_SERVICE)347 sudo('systemctl start %s' % BACKEND_SERVICE)348@task349def restart_celery():350 sudo('systemctl stop %s' % CELERY_SERVICE)351 sudo('systemctl start %s' % CELERY_SERVICE)352@task353def check_status():354 services_to_check = [BACKEND_SERVICE,355 CELERY_SERVICE]356 for service in services_to_check:357 sudo('systemctl --no-pager --full status %s' % service)358@task359def check_steepshot_service():360 sudo('systemctl --no-pager --full status %s' % BACKEND_SERVICE)361@task362def check_celery_service():363 sudo('systemctl --no-pager --full status %s' % CELERY_SERVICE)364@task365def deploy_static():366 """367 Collects django static files.368 """369 require('settings_module')370 with settings(sudo_user=DEPLOYMENT_USER,371 sudo_prefix=SUDO_PREFIX), cd(DEPLOY_DIR):372 with prefix('workon %s' % ENV_NAME):373 sudo('python manage.py collectstatic --noinput --settings %s'374 % env.settings_module)375@task376def update_static_chmod():377 sudo('chmod -R 664 %s' % STATIC_ROOT)378 sudo('chmod -R a+X %s' % STATIC_ROOT)379 sudo('chmod -R 664 %s' % MEDIA_ROOT)380 sudo('chmod -R a+X %s' % MEDIA_ROOT)381@task382def createsuperuser():383 require('settings_module')384 with settings(sudo_user=DEPLOYMENT_USER,385 sudo_prefix=SUDO_PREFIX):386 with prefix('workon %s' % ENV_NAME):387 sudo('python manage.py createsuperuser '388 '--settings ' + env.settings_module)389@task390def build_spa():391 if not os.path.exists(FRONTEND_LOCAL_DIR):392 logger.warning('Could not find repository '393 'of the frontend application, '394 'please clone it under the required '395 'dir ("%s")', FRONTEND_LOCAL_DIR)396 sys.exit(1)397 # with lcd(FRONTEND_LOCAL_DIR):398 # perform_checkout = prompt('We are going to checkout the local frontend repository '399 # 'to the revision "{}". This may cause data loss, make '400 # 'sure you\'ve staged your uncommited changes. Checkout? (y/n)'401 # .format(env.branch),402 # validate=r'(y|n)',403 # default='n')404 # if perform_checkout.lower() == 'n':405 # logger.warning('Exiting.')406 # exit(1)407 # local('git fetch --all')408 # local('git reset --hard')409 # local('git checkout {}'.format(env.branch))410 # local('git pull origin {}'.format(env.branch))411 # local(FRONTEND_BUILD_COMMAND)412@task413def copy_spa():414 """415 Copies artifacts created416 after the front-end build417 """418 with lcd(FRONTEND_LOCAL_DIR):419 with settings(sudo_user=DEPLOYMENT_USER,420 sudo_prefix=SUDO_PREFIX):421 put('dist/*', WEBAPP_STATIC_DIR)422@task423def deploy_spa_nginx_config():424 remote_sa_path = '/etc/nginx/sites-available/%s' % env.web_host425 context = {426 'WEBAPP_HOST': env.web_host,427 'ENV': env.env_name,428 'DEPLOY_DIR': DEPLOY_DIR,429 'STATIC_DIR': WEBAPP_STATIC_DIR,430 }431 upload_template(template_dir=LOCAL_CONF_DIR,432 filename='web_app.nginx.conf.j2',433 destination=remote_sa_path,434 context=context,435 use_sudo=True,436 use_jinja=True,437 backup=False)438 sudo('ln -sf %s /etc/nginx/sites-enabled' % remote_sa_path)439 sudo('service nginx restart')440@task441def deploy_spa():442 require('current_host', 'hosts')443 build_spa()444 copy_spa()445 deploy_spa_nginx_config()446@task447def first_time_deploy():448 """449 Call this task when deploying for the first time450 """451 create_non_priveledged_user()452 prepare()453 config()454 deploy()455@task456def deploy():457 require('branch', 'user', 'hosts')458 deploy_files()459 config_crontab()460 install_req()461 deploy_static()462 update_static_chmod()463 clean_pyc()464 migrate()465 install_systemd_services()...

Full Screen

Full Screen

hbase-slider.py

Source:hbase-slider.py Github

copy

Full Screen

1#!/usr/bin/env python2# -*- coding: utf-8 -*-3#4# Licensed to the Apache Software Foundation (ASF) under one or more5# contributor license agreements. See the NOTICE file distributed with6# this work for additional information regarding copyright ownership.7# The ASF licenses this file to You under the Apache License, Version 2.08# (the "License"); you may not use this file except in compliance with9# the License. You may obtain a copy of the License at10#11# http://www.apache.org/licenses/LICENSE-2.012#13# Unless required by applicable law or agreed to in writing, software14# distributed under the License is distributed on an "AS IS" BASIS,15# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.16# See the License for the specific language governing permissions and17# limitations under the License.18"""Invokes hbase shell after retrieving effective hbase-site.xml from a live Slider HBase cluster19First argument is the name of cluster instance20"""21import os22import subprocess23from os.path import expanduser24from os.path import exists25import glob26import getopt27import re28import fnmatch29import shutil30import logging31import socket32from string import Template33import time34import fileinput35import sys36import tempfile37import json38import datetime39from xml.dom import minidom40from xml.dom.minidom import parseString41import xml.etree.ElementTree as ET42import urllib243import hashlib44import random45import httplib, ssl46# find path to given command47def which(program):48 def is_exe(fpath):49 return os.path.isfile(fpath) and os.access(fpath, os.X_OK)50 fpath, fname = os.path.split(program)51 if fpath:52 if is_exe(program):53 return program54 else:55 for path in os.environ["PATH"].split(os.pathsep):56 path = path.strip('"')57 exe_file = os.path.join(path, program)58 if is_exe(exe_file):59 return exe_file60 return None61SLIDER_DIR = os.getenv('SLIDER_HOME', None)62if SLIDER_DIR == None or (not os.path.exists(SLIDER_DIR)):63 SLIDER_CMD = which("slider")64 if SLIDER_DIR == None:65 if os.path.exists("/usr/bin/slider"):66 SLIDER_CMD = "/usr/bin/slider"67 else:68 print "Unable to find SLIDER_HOME or slider command. Please configure SLIDER_HOME before running hbase-slider"69 sys.exit(1)70else:71 SLIDER_CMD = os.path.join(SLIDER_DIR, 'bin', 'slider.py')72HBASE_TMP_DIR=os.path.join(tempfile.gettempdir(), "hbase-temp")73# call slider command74def call(cmd):75 print "Running: " + " ".join(cmd)76 retcode = subprocess.call(cmd)77 if retcode != 0:78 raise Exception("return code from running %s was %d" % (cmd[0], retcode))79# Write text into a file80# wtext - Text to write81def writeToFile(wtext, outfile, isAppend=False):82 mode = 'w'83 if isAppend:84 mode = 'a+'85 outf = open(outfile, mode)86 try:87 outf.write(wtext)88 finally:89 outf.close()90# Update the XML configuration properties and write to another file91# infile - Input config XML file92# outfile - Output config XML file93# propertyMap - Properties to add/update94# {'name1':'value1', 'name2':'value2',...}95def writePropertiesToConfigXMLFile(infile, outfile, propertyMap):96 xmldoc = minidom.parse(infile)97 cfgnode = xmldoc.getElementsByTagName("configuration")98 if len(cfgnode) == 0:99 raise Exception("Invalid Config XML file: " + infile)100 cfgnode = cfgnode[0]101 propertyMapKeys = propertyMap.keys()102 removeProp = []103 modified = []104 for node in xmldoc.getElementsByTagName("name"):105 name = node.childNodes[0].nodeValue.strip()106 if name in propertyMapKeys:107 modified.append(name)108 for vnode in node.parentNode.childNodes:109 if vnode.nodeName == "value":110 if vnode.childNodes == []:111 removeProp.append(name)112 modified.remove(name)113 else:114 vnode.childNodes[0].nodeValue = propertyMap[name]115 remaining = list(set(propertyMapKeys) - set(modified))116 # delete properties whose value is set to None e.g.<value></value>117 for node in xmldoc.getElementsByTagName("name"):118 name = node.childNodes[0].nodeValue.strip()119 if name in removeProp:120 parent = node.parentNode121 super = parent.parentNode122 super.removeChild(parent)123 for property in remaining:124 pn = xmldoc.createElement("property")125 nn = xmldoc.createElement("name")126 ntn = xmldoc.createTextNode(property)127 nn.appendChild(ntn)128 pn.appendChild(nn)129 vn = xmldoc.createElement("value")130 vtn = xmldoc.createTextNode(str(propertyMap[property]))131 vn.appendChild(vtn)132 pn.appendChild(vn)133 cfgnode.appendChild(pn)134 writeToFile(xmldoc.toxml(), outfile)135def install(cluster_instance, dir):136 """Syntax: [hbase-slider cluster_instance install dir]137 Installs a fully configured hbase client in the specified dir138 The resulting client may be used on its own without hbase-slider139 """140 if os.path.exists(dir):141 raise Exception("Install dir must not exist: " + dir)142 workdir = os.path.join(tempfile.gettempdir(), 'install-work-dir')143 statusfile = os.path.join(workdir, 'status.json')144 cmd = [SLIDER_CMD, "status", cluster_instance, "--out", statusfile]145 call(cmd)146 infile = open(statusfile)147 try:148 content = json.load(infile)149 finally:150 infile.close()151 appdef = content['options']['application.def']152 appdeffile = appdef[appdef.rfind('/')+1:]153 cmd = ["hadoop", "fs", "-copyToLocal", appdef, workdir]154 call(cmd)155 cmd = ["unzip", os.path.join(workdir, appdeffile), "-d", workdir]156 call(cmd)157 gzfile = glob.glob(os.path.join(workdir, 'package', 'files', 'hbase*gz'))158 if len(gzfile) != 1:159 raise Exception("got " + gzfile + " from glob")160 cmd = ["tar", "xvzf", gzfile[0], '-C', workdir]161 call(cmd)162 tmp_hbase = glob.glob(os.path.join(workdir, 'hbase-[.0-9]*'))163 if len(tmp_hbase) != 1:164 raise Exception("got " + tmp_hbase + " from glob")165 tmp_hbase = tmp_hbase[0]166 confdir = os.path.join(tmp_hbase, 'conf')167 tmpHBaseConfFile=os.path.join(tempfile.gettempdir(), "hbase-site.xml")168 call([SLIDER_CMD, "registry", "--getconf", "hbase-site", "--user", "hbase", "--format", "xml", "--dest", tmpHBaseConfFile, "--name", cluster_instance])169 global HBASE_TMP_DIR170 propertyMap = {'hbase.tmp.dir' : HBASE_TMP_DIR, "instance" : cluster_instance}171 writePropertiesToConfigXMLFile(tmpHBaseConfFile, os.path.join(confdir, "hbase-site.xml"), propertyMap)172 libdir = os.path.join(tmp_hbase, 'lib')173 for jar in glob.glob(os.path.join(workdir, 'package', 'files', '*jar')):174 shutil.move(jar, libdir)175 shutil.move(tmp_hbase, dir)176def quicklinks(app_name):177 """Syntax: [hbase-slider appname quicklinks]178 Prints the quicklinks information of hbase-slider registry179 """180 cmd = [SLIDER_CMD, "registry", "--getconf", "quicklinks", "--format", "json",181 "--name", app_name]182 call(cmd)183home = expanduser("~")184if len(sys.argv) < 2:185 print "optionally you can specify the output directory for conf dir using:"186 print " --appconf=<dir>"187 print "optionally you can specify the (existing) directory for hbase conf files (as template) using:"188 print " --hbaseconf=<dir>"189 print "optionally you can specify the age in number of hours beyond which hbase-site.xml would be retrieved from slider cluster"190 print " --ttl=<age of hbase-site.xml>"191 print "the name of cluster instance is required as the first parameter following options"192 print "the second parameter can be:"193 print " shell (default) - activates hbase shell based on retrieved hbase-site.xml"194 print " quicklinks - prints quicklinks from registry"195 print " install <dir> - installs hbase client into <dir>"196 sys.exit(1)197try:198 opts, args = getopt.getopt(sys.argv[1:], "", ["appconf=", "hbaseconf=", "ttl="])199except getopt.GetoptError as err:200 # print help information and exit:201 print str(err)202 sys.exit(2)203cluster_instance=args[0]204local_conf_dir=os.path.join(home, cluster_instance, 'conf')205hbase_conf_dir="/etc/hbase/conf"206ttl=0207for o, a in opts:208 if o == "--appconf":209 local_conf_dir = a210 elif o == "--hbaseconf":211 hbase_conf_dir = a212 elif o == "--ttl":213 ttl = a214if len(args) > 1:215 if args[1] == 'quicklinks':216 quicklinks(cluster_instance)217 sys.exit(0)218 elif args[1] == 'install':219 install(cluster_instance, args[2])220 sys.exit(0)221needToRetrieve=True222HBaseConfFile=os.path.join(local_conf_dir, "hbase-site.xml")223if not exists(local_conf_dir):224 shutil.copytree(hbase_conf_dir, local_conf_dir)225else:226 shutil.copy2(os.path.join(hbase_conf_dir, "hbase-env.sh"), local_conf_dir)227 if exists(HBaseConfFile):228 diff = os.path.getmtime(HBaseConfFile)-int(time.time())229 diff = diff / 60 / 60230 print HBaseConfFile + " is " + str(diff) + " hours old"231 if diff < ttl:232 needToRetrieve=False233if needToRetrieve:234 tmpHBaseConfFile=os.path.join(tempfile.gettempdir(), "hbase-site.xml")235 call([SLIDER_CMD, "registry", "--getconf", "hbase-site", "--user", "hbase", "--format", "xml", "--dest", tmpHBaseConfFile, "--name", cluster_instance])236 propertyMap = {'hbase.tmp.dir' : HBASE_TMP_DIR, "instance" : cluster_instance}237 writePropertiesToConfigXMLFile(tmpHBaseConfFile, HBaseConfFile, propertyMap)238 print "hbase configuration is saved in " + HBaseConfFile...

Full Screen

Full Screen

test_monteliblobber.py

Source:test_monteliblobber.py Github

copy

Full Screen

1import unittest2import ipaddress3from flask import json4from Monteliblobber import monteliblobber5from Monteliblobber.settings import Config6import os7# Create config object.8c = Config()9TEST_ROOT = os.path.dirname(os.path.abspath(__file__))10# Acquire test blob data11with open(os.path.join(TEST_ROOT, 'email_message_source.txt'), 'r') as f:12 TEST_BLOB = f.read()13class MonteliblobberTestCase(unittest.TestCase):14 def setUp(self):15 self.config = Config()16 self.preflight = monteliblobber.preflight_check17 monteliblobber.app.config['TESTING'] = True18 # Add additional white list entries19 self.config.WHITELISTS['domains'].extend(['apple.com', 'email.com'])20 # Add additional named networks21 self.config.NAMED_NETWORKS.update({'WATCH': [ipaddress.ip_network('87.236.220.0/24')]})22 self.ips = ['72.167.218.149', '87.236.220.167']23 self.tags = ['United States', 'Spain', 'WATCH']24 self.urls = ['http://yvonneevans.net/wp-content/plugins/the-events-calendar/resources/tumour.php']25 self.hostnames = [26 '87-236-220-167.factoriadigital.com',27 'jantje.com',28 'p3plibsmtp03-02.prod.phx3.secureserver.net',29 'p3plsmtp03-01.prod.phx3.secureserver.net'30 ]31 self.addresses = ['jantje@jantje.com']32 self.app = monteliblobber.app.test_client()33 def tearDown(self):34 pass35 def test_extract_ip_addresses(self):36 """ IP Address are extracted and tagged correctly.37 """38 data = monteliblobber.get_network_addresses(39 TEST_BLOB,40 self.config.MAXMIND_CITY_DB_PATH,41 self.config.BLACKLIST_DB,42 self.config.NAMED_NETWORKS,43 self.config.WHITELISTS['network_addresses']44 )45 for record in data:46 with self.subTest(record['value']):47 self.assertIn(record['value'], self.ips)48 with self.subTest(record['tags']):49 for tag in record['tags']:50 self.assertIn(tag, self.tags)51 def test_extract_urls(self):52 """ One URL is extracted. URLs with a white listed domain are not present.53 """54 data = monteliblobber.get_urls(TEST_BLOB, self.config.WHITELISTS['domains'])55 for record in data:56 with self.subTest(record['value']):57 self.assertIn(record['value'], self.urls)58 def test_extract_hostnames(self):59 """ Hostnames are extracted, except those with a white listed domain.60 """61 data = monteliblobber.get_hostnames(62 TEST_BLOB,63 self.config.ROOT_DOMAINS_PATH,64 self.config.WHITELISTS['domains']65 )66 for record in data:67 with self.subTest(record['value']):68 self.assertIn(record['value'], self.hostnames)69 def test_extract_email_addresses(self):70 """ Email addresses are extracted, except those with a white listed domain.71 """72 data = monteliblobber.get_email_addresses(TEST_BLOB, self.config.WHITELISTS['domains'])73 for record in data:74 with self.subTest(record['value']):75 self.assertIn(record['value'], self.addresses)76 def test_integration(self):77 """ All artifacts are extracted through the web service.78 """79 rv = self.app.post('/', data={'blob': TEST_BLOB})80 data = json.loads(rv.data)['data']81 for record in data:82 with self.subTest(record['value']):83 if record['data_type'] == 'ipv4_address':84 self.assertIn(record['value'], self.ips)85 with self.subTest(record['value']):86 if record['data_type'] == 'url':87 self.assertIn(record['value'], self.urls)88 with self.subTest(record['value']):89 if record['data_type'] == 'dns_name':90 self.assertIn(record['value'], self.hostnames)91 with self.subTest(record['value']):92 if record['data_type'] == 'email':93 self.assertIn(record['value'], self.addresses)94 with self.subTest(record['tags']):95 if len(str(record['tags'])) > 0:96 for tag in record['tags']:97 self.assertIn(tag, self.tags)98 def test_lookup_files_exist(self):99 result = self.preflight(100 self.config.BLACKLIST_DB,101 self.config.MAXMIND_CITY_DB_PATH,102 self.config.ROOT_DOMAINS_PATH103 )104 assert result105def setUpModule():106 # If local config directory exists, ensure empty, else create the directory.107 if os.path.isdir(c.LOCAL_CONF_DIR):108 files = os.listdir(c.LOCAL_CONF_DIR)109 for file in files:110 os.remove(os.path.join(c.LOCAL_CONF_DIR, file))111 else:112 os.mkdir(c.LOCAL_CONF_DIR)113 # Download fresh static files.114 monteliblobber.get_root_domains(c.ROOT_DOMAINS_URL, c.ROOT_DOMAINS_PATH)115 monteliblobber.get_geoip_database(c.GEOIP_DB_URL, c.MAXMIND_CITY_DB_PATH)116 monteliblobber.get_blacklists(c.BLACKLISTS, c.BLACKLIST_DB)117def tearDownModule():118 # Clean up the local config directory and files.119 files = os.listdir(c.LOCAL_CONF_DIR)120 for file in files:121 os.remove(os.path.join(c.LOCAL_CONF_DIR, file))122 os.removedirs(c.LOCAL_CONF_DIR)123if __name__ == '__main__':...

Full Screen

Full Screen

addontool.py

Source:addontool.py Github

copy

Full Screen

1import logging2import logging.handlers3import os4import sys5import argparse6import shutil7script_dir = os.path.dirname(os.path.realpath(__file__))8sys.path.append(os.path.join(script_dir, 'lib', 'buildtools'))9from buildtools import os_utils10from buildtools.os_utils import cmd, Chdir11base_conf_dir = os.path.join(script_dir, 'conf.d')12local_conf_dir = os.path.join(os.getcwd(), 'conf.d')13base_addon_dir = os.path.join(base_conf_dir, 'addons')14local_addon_dir = os.path.join(local_conf_dir, 'addons')15base_template_dir = os.path.join(script_dir, 'conf.template')16def handle_enable(args):17 for module in args.module:18 if not enable_addon(module):19 break20def handle_disable(args):21 for module in args.module:22 if not disable_addon(module):23 break24def enable_addon(addon):25 addon = addon.split('.')[0]26 link = os.path.join(local_addon_dir, addon + '.yml')27 filename = os.path.join(base_addon_dir, addon + '.yml.disabled')28 if not os.path.isfile(filename):29 log.error("Addon %r doesn't exist.", addon)30 return False31 if os.path.islink(link):32 log.warn("Addon %r is already enabled.", addon)33 return True34 os_utils.ensureDirExists(os.path.dirname(link), mode=0o755)35 if cmd(['ln', '-sf', filename, link], show_output=False, critical=True):36 log.info('Addon %r enabled.', addon)37 return True38def disable_addon(addon):39 addon = addon.split('.')[0]40 link = os.path.join(local_addon_dir, addon + '.yml')41 filename = os.path.join(base_addon_dir, addon + '.yml.disabled')42 if not os.path.isfile(filename):43 log.error("Addon %r doesn't exist.", addon)44 return True45 if not os.path.islink(link):46 log.warn("Addon %r is already disabled.", addon)47 return False48 os.remove(link)49 log.info('OK: Addon %r disabled.', addon)50 return False51def cp(src, dest):52 if not os.path.isfile(dest):53 log.info('cp -p "%s" "%s"', src, dest)54 shutil.copy2(src, dest)55def handle_install(args):56 os_utils.ensureDirExists(args.directory, mode=0o700, noisy=True)57 with Chdir(args.directory):58 os_utils.ensureDirExists('conf.d', mode=0o700, noisy=True)59 os_utils.ensureDirExists('conf.d/addons', mode=0o700, noisy=True)60 os_utils.ensureDirExists('cache', mode=0o700, noisy=True)61 cp(os.path.join(base_conf_dir, 'fastdl.yml.disabled'), os.path.join(local_conf_dir, 'fastdl.yml.example'))62 if args.addon:63 for addon in args.addon:64 enable_addon(addon)65 log.info('Writing launch.sh...')66 with open('launch.sh', 'w') as f:67 f.write('#!/bin/bash\n')68 f.write('cd "{cwd}"\n'.format(cwd=os.path.realpath(args.directory)))69 f.write('python "{script}" $@\n'.format(script=os.path.join(script_dir, 'Watchdog.py')))70 log.info('chmod 700 launch.sh')71 os.chmod('launch.sh', 0o700)72 cp(os.path.join(script_dir, 'conf.templates', args.template), os.path.join(os.getcwd(), 'watchdog.yml'))73if __name__ == '__main__':74 parser = argparse.ArgumentParser(description='Configure addon modules.')75 subcmds = parser.add_subparsers()76 enable_cmd = subcmds.add_parser('enable', help='Enable addons in conf.d/addons/.')77 enable_cmd.add_argument('module', nargs='+', help="Module filename, relative to conf.d/addons, with no file extensions. (e.g. garrysmod/ulx)")78 enable_cmd.set_defaults(func=handle_enable)79 disable_cmd = subcmds.add_parser('disable', help='Disable addons in conf.d/addons/.')80 disable_cmd.add_argument('module', nargs='+', help="Module filename, relative to conf.d/addons, with no file extensions. (e.g. garrysmod/ulx)")81 disable_cmd.set_defaults(func=handle_disable)82 install_cmd = subcmds.add_parser('install', help="Create a run environment in a given directory.")83 install_cmd.add_argument('template', type=str, help="Which conf.template do you want to use for watchdog.yml?")84 install_cmd.add_argument('directory', type=str, help="Where to create the run directory? (Cannot be the watchdog script directory).")85 install_cmd.add_argument('-a', '--addon', nargs='*', type=str, help="Enable an addon by the given name. (e.g. garrysmod/ulx)")86 install_cmd.set_defaults(func=handle_install)87 logFormatter = logging.Formatter(fmt='%(asctime)s [%(levelname)-8s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') # , level=logging.INFO, filename='crashlog.log', filemode='a+')88 log = logging.getLogger()89 _args = parser.parse_args()90 # print(repr(_args))...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful