How to use relative_symlinks method in Slash

Best Python code snippet using slash

munging.py

Source:munging.py Github

copy

Full Screen

1# coding=utf-82"""3Munge original data together to a more cohesive and uniform shape.4You probably want to run bootstrap_braincode_dir (at the end of this file).5This filters and puts together data for our two datasets (CB1 and T1):6 - in the strawlab central repository (images, templates and segmentations),7 - in Florian's HDF5 files (to find which images go into which dataset, CB1 or T1),8 - in Florian's CSV database dumps (linking images to driver lines).9All these also keep the original brainbase database ids for provenance.10"""11from __future__ import print_function, division12import glob13import sys14import os15import os.path as op16import shutil17from collections import namedtuple18from functools import partial19from itertools import product20from textwrap import dedent21import h5py22import nrrd23import numpy as np24import pandas as pd25from py_amira_file_reader.read_amira import read_amira26from braincode.revisions.config import braincode_dir, BRAINCODE_PACKAGE_DIR, STRAWLAB_ROOT27from braincode.revisions.images import VoxelLabels28# We probably should manage to remove these two imports.29# So ultimately we do not depend on Florian's HDF5s and CSVs.30from braincode.CAUTION.copy_florians_data import my_read_csv31from braincode.util import ExpressionDataset, get_all_datasets, get_all_neuropils, ensure_dir32# --- Original files and configurations33# Where original data and brainbase database query dumps are34# Downsample sizes From [T1|CB1]/.../Antennal_lobe_clusterimage.am35# At the moment the HDF5 files are read using braincode.util36_Originals = namedtuple('Originals', ['template',37 'amiras',38 'test_amira',39 'regions',40 'downsampled_size'])41_T1_ORIGINALS = _Originals(42 template=op.join(STRAWLAB_ROOT, 'brainwarp', 'T1V2template', 'T1.am'),43 amiras=op.join(STRAWLAB_ROOT, 'bbweb', 'internal', 'Data', 'channelimages'),44 test_amira='TP10100501L33Sum01.am',45 regions=op.join(STRAWLAB_ROOT, 'Laszlo', 'T1_Ito_new'),46 downsampled_size=(154, 154, 41))47_CB1_ORIGINALS = _Originals(48 template=op.join(STRAWLAB_ROOT, 'brainwarp', 'CB1_template', 'latest',49 'Merged_highcontrast_BCimpr_final_notcompressed.am'),50 amiras=op.join(STRAWLAB_ROOT, 'bbweb', 'internal', 'Data', 'channelimages'),51 test_amira='CB1_GMR_9C12_AE_01_29-fA01b_C100120_20100120125741015_02_warp_m0g80c8e1e-1x26r301.am',52 regions=op.join(STRAWLAB_ROOT, 'Laszlo', 'CB1_Ito', 'ItoCb1Amira'),53 downsampled_size=(341, 171, 73))54_ID2NAME_CSV = op.join(op.dirname(BRAINCODE_PACKAGE_DIR), 'images_id_to_file.csv')55_NAME2DRIVER_CSV = op.join(op.dirname(BRAINCODE_PACKAGE_DIR), 'CAUTION', 'names_drivers.csv')56def dataset2human(dataset):57 return {'CB1': 'Janelia FlyLight', 'T1': 'Vienna Tiles'}[dataset]58# --- Original segmentations of the brains into "neuropils"59# Format is not uniform between datasets, these functions take care of it.60# The annotations on the CB1 dataset were inferred by "warping" these in the T1 dataset.61# Florian knows the details.62_NEUROPILS = {63 # N.B. capitals to match supplemental dir names64 # When relevant, we did the original clusterings in the right hemisphere65 'SEZ': ['GNG', 'PRW', 'SAD', 'AMMC_L', 'AMMC_R'],66 'Mushroom_Body_L': ['MB_CA_L', 'MB_ML_L', 'MB_PED_L', 'MB_VL_L'],67 'Mushroom_Body_R': ['MB_CA_R', 'MB_ML_R', 'MB_PED_R', 'MB_VL_R'],68 'Mushroom_Body': ['MB_CA_L', 'MB_ML_L', 'MB_PED_L', 'MB_VL_L',69 'MB_CA_R', 'MB_ML_R', 'MB_PED_R', 'MB_VL_R'],70 'Antennal_lobe_L': ['AL_L'],71 'Antennal_lobe_R': ['AL_R'],72 'Antennal_lobe': ['AL_L', 'AL_R'],73 'Optic_Glomeruli_L': ['AOTU_L', 'PVLP_L', 'PLP_L'],74 'Optic_Glomeruli_R': ['AOTU_R', 'PVLP_R', 'PLP_R'],75 'Optic_Glomeruli': ['AOTU_L', 'PVLP_L', 'PLP_L',76 'AOTU_R', 'PVLP_R', 'PLP_R'],77 'Central_Complex': ['PB', 'EB', 'FB', 'NO']78}79def neuropil2human(neuropil):80 # The original in braincode-web/util.js81 prefix = 'Left ' if neuropil.endswith('L') else 'Right ' if neuropil.endswith('R') else ''82 n2h = {83 'Optic_Glomeruli': 'Optic Glomeruli (oVLNP)',84 'Mushroom_Body': 'Mushroom Body (MB)',85 'Central_Complex': 'Central Complex (CX)',86 'Antennal_lobe': 'Antennal Lobe (AL)',87 'SEZ': 'Subesophageal Zone (SEZ)',88 }89 for np_name, human_name in n2h.items():90 if neuropil.startswith(np_name):91 return prefix + human_name92 raise ValueError('Unknown region "%s"' % neuropil)93_REGIONNAME2ID_EXPECTATIONS = (94 ('GNG', 39),95 ('PRW', 40),96 ('SAD', 8),97 ('AMMC_L', 48),98 ('AMMC_R', 10),99 ('LO_L', 43),100 ('LO_R', 2),101 ('LOP_L', 58),102 ('LOP_R', 20),103 ('AOTU_L', 68),104 ('AOTU_R', 32),105 ('ME_L', 60),106 ('EB', 21),107)108def _parse_brain_region_descriptions(regions_path):109 # Read the region description110 # This has short descriptions for some acronyms111 regionid2description = {}112 with open(regions_path) as reader:113 text = reader.read().strip()114 for line in text.splitlines():115 region_id, _, region = line.partition(' ')116 region = region.strip()117 regionid2description[region_id] = region118 # Extend to Mushroom Body subregions119 regionid2description['MB_CA'] = \120 regionid2description['MB_ML'] = \121 regionid2description['MB_PED'] = \122 regionid2description['MB_VL'] = regionid2description['MB']123 return regionid2description124def check_region2id(name, expectation, region2id):125 if region2id[name] != expectation:126 raise Exception('Expected ID %d for regions %s, got %d' %127 (expectation, name, region2id[name]))128def _generate_regions_nrrds(dataset, regions, sanity_checks_dir=None):129 """130 This function generates nrrd files to check by experts:131 - One with with all the region assignments.132 - One per neuropil133 These images go out of the release directory.134 """135 print('\tWriting regions nrrds for dataset %s' % dataset)136 if sanity_checks_dir is None:137 sanity_checks_dir = ensure_dir(op.abspath(op.join(braincode_dir(),138 '..',139 'regions-sanity')))140 # Write nrrd with all the assignments141 print('\t\tAll regions...')142 nrrd.write(op.join(sanity_checks_dir, '%s-regions.nrrd' % dataset),143 regions.voxel2labelid)144 # Write a nrrd per neuropil145 for neuropil in regions.labels() + regions.label_group_names():146 print('\t\t%s...' % neuropil)147 mask = regions.mask(neuropil)148 image = np.zeros_like(mask, dtype=np.uint8)149 image[~mask] = 255150 nrrd.write(op.join(sanity_checks_dir, '%s-%s.nrrd' % (dataset, neuropil)), image)151def _t1_regions_munger():152 amira_path = op.expanduser(op.join(_T1_ORIGINALS.regions,153 'composition_s2_t128_laszlo_AL_R_andL.am'))154 # Read the region description155 regions_path = op.join(op.dirname(amira_path),156 'DrosophilaBrainRegions.terms.txt')157 region2description = _parse_brain_region_descriptions(regions_path)158 # Read the amira data file for voxel assignments and regions mappings159 amira_data = read_amira(amira_path)160 assert amira_data['info'] == {'version': '2.0', 'type': 'AmiraMesh', 'is_binary': True}161 data = amira_data['data']162 # Declared shape163 shape = tuple(data[0]['define']['Lattice'])164 assert shape == (768, 768, 165)165 parameters = data[1]['Parameters']166 # Region2id. Here 1 = exterior, 0 = unselected, anything higher means a named selected region (neuropil)167 region2id = {material: mid['Id'] - 1 for material, mid in parameters['Materials'].items()}168 # Sanity checks - Andrew provided these expectations after looking at the actual image169 for name, expected_id in _REGIONNAME2ID_EXPECTATIONS:170 check_region2id(name, expected_id, region2id)171 # We also have BoundingBox, CoordType, Content and Seeds->Slices172 # Plus data types declarations and data for two tensors:173 # - data: the labels174 # - probabilities: byte type, probably we do not need this175 # voxel2regionid176 voxel2regionid = data[4]['data'] # Like 768x768x165 array of codes (aka "labels")177 assert voxel2regionid.shape == shape178 # generate a table label -> id179 def LR_region2description(region):180 prefix = 'left ' if region.endswith('_L') else 'right ' if region.endswith('_R') else ''181 key = region if prefix == '' else region[:-2]182 return prefix + region2description.get(key, key)183 regions = [(region, region_id, LR_region2description(region))184 for region, region_id in sorted(region2id.items())]185 regions_df = pd.DataFrame(regions, columns=['name', 'id', 'description'])186 return VoxelLabels(regions_df, voxel2regionid, label_groups=_NEUROPILS)187def _cb1_regions_munger(clean_spurious_voxels=True):188 path = op.expanduser(_CB1_ORIGINALS.regions)189 # Read the region description190 region2description = _parse_brain_region_descriptions(op.join(path, 'DrosophilaBrainRegions.terms.txt'))191 # Read the map region_name -> label192 # Note, this can be found also in the .surf file, which seems the original193 # So maybe we should use that file instead194 params = op.join(path, 'DrosophilaBrainRegions.params.txt')195 def parse_line(line):196 region_name, _, region_id, _, r, g, b = line.strip().split()197 region_id = int(region_id)198 region_color = (float(r), float(g), float(b))199 return region_name, region_id, region_color200 with open(params, 'r') as reader:201 parsed_params = [parse_line(line) for line in reader if 0 < len(line.strip())]202 region2id = {region_name: region_id - 1 for region_name, region_id, _ in parsed_params}203 # N.B. we probably want to keep colors so we are consistent if we ever generate images204 # Sanity checks - Andrew provided these expectations after looking at the actual image205 for name, expected_id in _REGIONNAME2ID_EXPECTATIONS:206 check_region2id(name, expected_id, region2id)207 # Read the masks208 def fn2label(amira):209 return int(op.basename(amira)[len('NeuropilMask'):].split('.')[0])210 amiras = glob.glob(op.join(path, 'NeuropilMask*.am'))211 label2amira = {fn2label(amira) + 1: amira for amira in amiras}212 # N.B. +1 for making it 1-based, as in the parameters file; it seems that the files go that name213 # All this is too speculative, talk to K/L/F214 # Combine all these into one "labels" image215 voxel2region = None216 for region_id, amira in sorted(label2amira.items()):217 if voxel2region is None:218 voxel2region = read_amira(amira)['data'][3]['data']219 voxel2region[voxel2region == 1] = region_id220 else:221 data = read_amira(amira)['data'][3]['data']222 region_voxels = data == 1223 if ((voxel2region != 0) & region_voxels).any():224 raise Exception('Overlapping regions are not supported at the moment')225 voxel2region[region_voxels] = region_id226 voxel2regionid = voxel2region227 # generate a table label -> id228 def LR_region2description(region):229 prefix = 'left ' if region.endswith('_L') else 'right ' if region.endswith('_R') else ''230 key = region if prefix == '' else region[:-2]231 return prefix + region2description.get(key, key)232 regions = [(region, region_id, LR_region2description(region))233 for region, region_id in sorted(region2id.items())]234 regions_df = pd.DataFrame(regions, columns=['name', 'id', 'description'])235 # clean these pesky spurious voxels236 def remove_spurious_voxels_on_the_other_side(voxel2regionid, regions_df):237 """238 Clean spurious voxels on the other side in L/R segmentations.239 This happens apparently only in the right regions of CB1;240 I have just checked it in "AL_R", "Mushroom_Body_R" and "Optic_Glomeruli_R".241 I will assume it might happens in all the left/right regions.242 Note: this can only work in the CB1 dataset (which is centered).243 """244 # midx = int(np.round(voxel2regionid.shape[0] / 2))245 midx = 510 # Inferred as the mean x in segments joining the mean coordinates of L,R pairs in the dataset246 in_right = np.zeros_like(voxel2regionid, dtype=np.bool)247 in_right[midx:] = True # Remember that left/right is from "fly perspective"248 # Probably we should just compute neuropil center, a histogram of distances and remove outliers249 # Or use properly fitted reflection planes250 for name, region_id in zip(regions_df.name, regions_df.id):251 if name.endswith('_L'):252 in_region = voxel2regionid == region_id253 spurious = in_region & ~in_right254 print('\t\t\t%s removed %d of %d (%.4f%%) voxels in the other side' %255 (name, spurious.sum(), in_region.sum(), 100 * spurious.sum() / in_region.sum()))256 voxel2regionid[spurious] = 0257 if name.endswith('_R'):258 in_region = voxel2regionid == region_id259 spurious = in_region & in_right260 print('\t\t\t%s removed %d of %d (%.4f%%) voxels in the other side' %261 (name, spurious.sum(), in_region.sum(), 100 * spurious.sum() / in_region.sum()))262 voxel2regionid[spurious] = 0263 return voxel2regionid264 if clean_spurious_voxels:265 print('\t\tWARNING: removing spurious voxels in the other side...')266 print('\t\tTHIS IS UNTESTED AND PROBABLY BROKEN AT THE MOMENT')267 voxel2regionid = remove_spurious_voxels_on_the_other_side(voxel2regionid, regions_df)268 return VoxelLabels(regions_df, voxel2regionid, label_groups=_NEUROPILS)269# dest_dir = ensure_dir(op.expanduser('~/clean-vs-not'))270# vl = _cb1_regions_munger(clean_spurious_voxels=False)271# vl.to_hdf5(op.join(dest_dir, 'original.h5'))272# _generate_regions_nrrds('CB1', vl, sanity_checks_dir=ensure_dir(op.join(dest_dir, 'original')))273# vl = _cb1_regions_munger(clean_spurious_voxels=True)274# vl.to_hdf5(op.join(dest_dir, 'clean.h5'))275# _generate_regions_nrrds('CB1', vl, sanity_checks_dir=ensure_dir(op.join(dest_dir, 'clean')))276# exit(22)277# --- Templates278def _read_template(amira_path):279 # Read280 amira_data = read_amira(amira_path)281 data = amira_data['data']282 shape = tuple(data[0]['define']['Lattice'])283 coord_type = data[1]['Parameters']['CoordType'][1:-1]284 bounding_box = np.array(data[1]['Parameters']['BoundingBox'])285 template = data[3]['data']286 # Dumbchecking stuff287 assert amira_data['info'] == {'version': '2.0', 'type': 'AmiraMesh', 'is_binary': True}288 assert coord_type == 'uniform'289 assert data[2]['Lattice']['byte'] in {'ScalarField', 'Data'}290 assert template.shape == shape291 return template, bounding_box292# --- Data release logic293def bootstrap_braincode_dir(id2name_csv=_ID2NAME_CSV,294 name2driver_csv=_NAME2DRIVER_CSV,295 bootstrap_dir=None,296 reset=False,297 ignore_originals=False,298 clean_spurious_voxels=False,299 generate_nrdds=False,300 relative_symlinks=True,301 log_duplicate_paths=False):302 """303 This function bootstraps "data release" directories for each of the datasets in our analysis.304 Ideally it should directly query the databases.305 At the moment, we use the files provided by Florian to bootstrap these simple, standalone data repositories.306 It generates the following hierarchy and files:307 bootstrap_dir308 |--T1 # dataset309 |--image2driver.csv # "master table" of dataset contents;310 | # it links original db_id with file names and driver lines311 |--image2driver.csv.description # description of the "master table"312 |--template_regions.h5 # the template image and its segmentation in neuropils are here313 |--template_regions.h5.description # description314 |--original # where the original images are symlinked315 |--images # images will be stored316 |--TP10100501L33Sum02.am # files with such names317 |--...318 |--template.am # the template image is here319 |--regions # region information for the image (e.g. neuropils)320 """321 # --- Database IDs for the images in our analysis322 # These are needed to later link to actual filenames, and useful to keep provenance.323 # Unfortunately this is backwards from Florian's hdf5 files ATM324 # We do not assume all lines are in all neuropils, so iterate all regions.325 # Maybe these are not all IDs we need; the way to go should be to run the query ourselves.326 db_ids = []327 for expression_dset, neuropil in product(get_all_datasets(), get_all_neuropils()):328 dset = ExpressionDataset.dataset(dset=expression_dset, neuropil=neuropil)329 db_ids.extend(zip(dset.lines(), [expression_dset] * len(dset.lines())))330 dbids_df = pd.DataFrame(data=sorted(set(db_ids)),331 columns=['db_id', 'dataset'])332 dbids_df = dbids_df.set_index('db_id')333 # --- db_id -> amira file_name334 # On a second CSV, we are given the link from a db_id to the amira file name335 # Probably all these could be done with a simple database query336 # Columns are ['id', 'file_path', 'previews']337 id2file_df = pd.read_csv(id2name_csv, sep=';').sort_values('file_path')338 # Check id uniqueness339 assert len(id2file_df.id) == id2file_df.id.nunique()340 # Rename id -> db_id; file_path -> file_name341 id2file_df = id2file_df.rename(columns=lambda col: {'id': 'db_id', 'file_path': 'file_name'}.get(col, col))342 # Set db_id as the index343 id2file_df = id2file_df.set_index('db_id')344 # Files seems duplicated in the database345 # Fortunately, each file is only once in our analysis, so not a big deal346 if log_duplicate_paths:347 duplicated_paths = id2file_df[id2file_df.file_name.duplicated()].file_name.unique()348 print('Images in more than one entry:\n%s' % '\n'.join(duplicated_paths))349 # Merge350 image2driver_df = pd.merge(id2file_df, dbids_df, left_index=True, right_index=True, how='inner')351 assert len(image2driver_df) == 9484352 assert image2driver_df.file_name.nunique() == 9484353 # --- file_name -> driver line354 # On a third text file, a weirdo DB dump, we link name to driver line and channel355 # Andrew deals with it in CAUTION/copy_florians_data.py:my_read_csv356 # This funky function gives a dictionary like this:357 # {name: {channel: (driver, line number in the CSV)}358 # We will just add name, channel and driver to our dataframe359 n2d = my_read_csv(name2driver_csv, verbose=False)360 # "name" can be inferred from the file_name easily (e.g. TP10100501L33Sum02.am -> TP10100501L33Sum)361 image2driver_df['name'] = image2driver_df.file_name.apply(lambda fp: op.splitext(fp)[0][:-2])362 # Add channel and driver to our dataframe, checking for uniqueness and ambiguities363 def name2driver(name):364 try:365 channel2driver = n2d[name]366 if len(channel2driver) == 0:367 raise Exception('No driver line found for name %s' % name)368 if len(channel2driver) > 1:369 raise Exception('Multiple channels/driver lines found for name %s' % name)370 channel, (driver, _) = channel2driver.popitem()371 return pd.Series({'driver': driver, 'channel': channel})372 except KeyError:373 raise Exception('Cannot find driver line for name %s' % name)374 # add name and driver as dataframe columns375 image2driver_df = image2driver_df.merge(image2driver_df.name.apply(name2driver), left_index=True, right_index=True)376 # make db_id a column377 image2driver_df = image2driver_df.reset_index()378 # reorder, drop name and previews379 image2driver_df = image2driver_df[['db_id', 'dataset', 'file_name', 'channel', 'driver']]380 image2driver_description = dedent("""381 The image2driver.csv file contains a table with an image per row and the following columns:382 - db_id: The ID of the image in the in-house database, for provenance tracking.383 - dataset: The dataset this image pertains to (e.g. T1 (Total Brain 1) or CB1 (Central Brain 1)).384 - file_name: The name of the amira file containing the image.385 - channel: The name of the channel in the image, usually indicating what was expressed (e.g. antiGFP).386 - driver: The driver line ID from flybase.387 """).strip()388 # --- Do release on a per-dataset basis389 bootstrap_dir = braincode_dir(bootstrap_dir, create=False)390 ensure_dir(bootstrap_dir)391 print('Bootstrapping braincode data in directory %s' % op.abspath(bootstrap_dir))392 for dataset, dataset_df in image2driver_df.groupby('dataset'):393 print('Releasing data for dataset %s' % dataset)394 dataset_dir = ensure_dir(op.join(bootstrap_dir, dataset))395 template_regions_h5 = op.join(dataset_dir, 'template_regions.h5')396 # Targeted cleanup397 if reset:398 print('\tRemoving directories and files, please wait...')399 shutil.rmtree(op.join(dataset_dir, 'originals'), ignore_errors=True)400 for fn in ['image2driver.csv', 'image2driver.csv.description',401 'template_regions.h5', 'template_regions.h5.description']:402 try:403 os.remove(op.join(dataset_dir, fn))404 except OSError:405 pass406 # Save the image2driver csv file407 print('\tSaving image2driver.csv')408 image2driver_csv = op.join(dataset_dir, 'image2driver.csv')409 dataset_df.to_csv(image2driver_csv, index=False)410 dataset_df = dataset_df.reset_index(drop=True)411 roundtripped = pd.read_csv(image2driver_csv, index_col=False, dtype={'channel': object})412 assert dataset_df.reset_index(drop=True).equals(roundtripped)413 with open(image2driver_csv + '.description', 'w') as writer:414 writer.write(image2driver_description)415 # Get the location of the original files in strawscience416 originals = _T1_ORIGINALS if dataset == 'T1' else _CB1_ORIGINALS417 # Symlink image files418 if not ignore_originals:419 print('\tSymlinking original amiras')420 original_dest = ensure_dir(op.join(dataset_dir, 'originals', 'images'))421 for file_name in dataset_df.file_name:422 original = op.expanduser(op.join(originals.amiras, file_name))423 if not op.isfile(original):424 print('WARNING: Cannot find image %s' % original)425 continue426 dest = op.join(original_dest, file_name)427 if relative_symlinks:428 original = op.relpath(original, original_dest)429 try:430 os.symlink(original, dest)431 except OSError as err:432 print('failing on symlink dest %r' % dest, file=sys.stderr)433 raise434 # Symlink neuropil files435 if not ignore_originals:436 print('\tSymlinking anatomical regions directory')437 original = op.expanduser(originals.regions)438 if not op.isdir(original):439 print('WARNING: Cannot find regions %s' % original)440 original_dest = op.join(dataset_dir, 'originals', 'regions')441 if relative_symlinks:442 original = op.relpath(original, op.dirname(original_dest))443 os.symlink(original, original_dest)444 # Read regions and save to HDF5445 print('\tMunging regions, please wait...')446 regions_munger = (_t1_regions_munger if dataset == 'T1' else447 partial(_cb1_regions_munger,448 clean_spurious_voxels=clean_spurious_voxels))449 regions = regions_munger()450 regions.to_hdf5(template_regions_h5, dataset_path='regions',451 compression='gzip', compression_opts=5)452 # Generate template nrrds453 if generate_nrdds:454 print('Generating region nrrds')455 _generate_regions_nrrds(dataset, regions)456 # Symlink template457 if not ignore_originals:458 print('\tSymlinking amira template')459 original = op.expanduser(originals.template)460 if not op.isfile(original):461 print('\tWARNING: Cannot find template %s' % original)462 original_dest = op.join(dataset_dir, 'originals', 'template.am')463 if relative_symlinks:464 original = op.relpath(original, op.dirname(original_dest))465 os.symlink(original, original_dest)466 # Read template and save to hdf5 (probably we could just skip this, for completeness)467 print('\tMunging template, please wait...')468 template, bounding_box = _read_template(op.expanduser(originals.template))469 with h5py.File(template_regions_h5, 'a') as h5:470 dset = h5.create_dataset('template', data=template,471 compression='gzip', compression_opts=5)472 dset.attrs['bounding_box'] = bounding_box473 dset.attrs['bounding_box_description'] = 'Bounding box is [xmin, xmax, ymin, ymax, zmin, zmax] micrometers'474 # Save the description of the hdf5 file475 template_regions_description = dedent("""476 This hdf5 file contains the registration template and,477 more importantly, its segmentation into regions/neuropils.478 The template is in dataset "template".479 The values are the intensity of the template image.480 It is a (xsize x ysize x zsize) byte array with attributes:481 - bounding_box: [xmin, xmax, ymin, ymax, zmin, zmax]482 - bounding_box_description: includes units (micrometers)483 The regions is in dataset "regions".484 The values are region id.485 It is a (xsize x ysize x zsize) byte array of "region ids" with attributes:486 - name: an array with the name of each region487 - id: an array with a numeric identifier of each region488 - description: an array with a longer description of each region489 - columns: ['name', 'id', 'description']490 - many 'rs=' attributes, that map region set name to region set labels491 If using python, this can be conveniently read using492 `braincode.revisions.hub.VoxelLabels.from_hdf5`.493 """).strip()494 with open(template_regions_h5 + '.description', 'w') as writer:495 writer.write(template_regions_description)496 print('\t%s done' % dataset)497# Uncomment this to keep developing the clean_spurious_voxels filter498# bootstrap_braincode_dir(generate_nrdds=True, clean_spurious_voxels=True, reset=True)499# exit(22)500if __name__ == '__main__':501 import argh...

Full Screen

Full Screen

manager.py

Source:manager.py Github

copy

Full Screen

1import errno2import json3import os4from galaxy import util5from galaxy.util.odict import odict6from galaxy.util.template import fill_template7from galaxy.tools.data import TabularToolDataTable8from tool_shed.util import common_util9import tool_shed.util.shed_util_common as suc10#set up logger11import logging12log = logging.getLogger( __name__ )13SUPPORTED_DATA_TABLE_TYPES = ( TabularToolDataTable )14VALUE_TRANSLATION_FUNCTIONS = dict( abspath=os.path.abspath )15DEFAULT_VALUE_TRANSLATION_TYPE = 'template'16class DataManagers( object ):17 def __init__( self, app, xml_filename=None ):18 self.app = app19 self.data_managers = odict()20 self.managed_data_tables = odict()21 self.tool_path = None22 self.filename = xml_filename or self.app.config.data_manager_config_file23 for filename in util.listify( self.filename ):24 if not filename:25 continue26 self.load_from_xml( filename )27 if self.app.config.shed_data_manager_config_file:28 self.load_from_xml( self.app.config.shed_data_manager_config_file, store_tool_path=False, replace_existing=True )29 def load_from_xml( self, xml_filename, store_tool_path=True, replace_existing=False ):30 try:31 tree = util.parse_xml( xml_filename )32 except Exception, e:33 log.error( 'There was an error parsing your Data Manager config file "%s": %s' % ( xml_filename, e ) )34 return #we are not able to load any data managers35 root = tree.getroot()36 if root.tag != 'data_managers':37 log.error( 'A data managers configuration must have a "data_managers" tag as the root. "%s" is present' % ( root.tag ) )38 return39 if store_tool_path:40 tool_path = root.get( 'tool_path', None )41 if tool_path is None:42 tool_path = self.app.config.tool_path43 if not tool_path:44 tool_path = '.'45 self.tool_path = tool_path46 for data_manager_elem in root.findall( 'data_manager' ):47 self.load_manager_from_elem( data_manager_elem, replace_existing=replace_existing )48 def load_manager_from_elem( self, data_manager_elem, tool_path=None, add_manager=True, replace_existing=False ):49 try:50 data_manager = DataManager( self, data_manager_elem, tool_path=tool_path )51 except Exception, e:52 log.error( "Error loading data_manager '%s':\n%s" % ( e, util.xml_to_string( data_manager_elem ) ) )53 return None54 if add_manager:55 self.add_manager( data_manager, replace_existing=replace_existing )56 log.debug( 'Loaded Data Manager: %s' % ( data_manager.id ) )57 return data_manager58 def add_manager( self, data_manager, replace_existing=False ):59 if not replace_existing:60 assert data_manager.id not in self.data_managers, "A data manager has been defined twice: %s" % ( data_manager.id )61 elif data_manager.id in self.data_managers:62 # Data Manager already exists, remove first one and replace with new one63 log.warning( "A data manager has been defined twice and will be replaced with the last loaded version: %s" % ( data_manager.id ) )64 self.remove_manager( data_manager.id )65 self.data_managers[ data_manager.id ] = data_manager66 for data_table_name in data_manager.data_tables.keys():67 if data_table_name not in self.managed_data_tables:68 self.managed_data_tables[ data_table_name ] = []69 self.managed_data_tables[ data_table_name ].append( data_manager )70 def get_manager( self, *args, **kwds ):71 return self.data_managers.get( *args, **kwds )72 def remove_manager( self, manager_ids ):73 if not isinstance( manager_ids, list ):74 manager_ids = [ manager_ids ]75 for manager_id in manager_ids:76 data_manager = self.get_manager( manager_id, None )77 if data_manager is not None:78 del self.data_managers[ manager_id ]79 #remove tool from toolbox80 if data_manager.tool:81 self.app.toolbox.remove_tool_by_id( data_manager.tool.id )82 #determine if any data_tables are no longer tracked83 for data_table_name in data_manager.data_tables.keys():84 remove_data_table_tracking = True85 for other_data_manager in self.data_managers.itervalues():86 if data_table_name in other_data_manager.data_tables:87 remove_data_table_tracking = False88 break89 if remove_data_table_tracking and data_table_name in self.managed_data_tables:90 del self.managed_data_tables[ data_table_name ]91class DataManager( object ):92 GUID_TYPE = 'data_manager'93 DEFAULT_VERSION = "0.0.1"94 def __init__( self, data_managers, elem=None, tool_path=None ):95 self.data_managers = data_managers96 self.declared_id = None97 self.name = None98 self.description = None99 self.version = self.DEFAULT_VERSION100 self.guid = None101 self.tool = None102 self.data_tables = odict()103 self.output_ref_by_data_table = {}104 self.move_by_data_table_column = {}105 self.value_translation_by_data_table_column = {}106 self.tool_shed_repository_info_dict = None107 self.undeclared_tables = False108 if elem is not None:109 self.load_from_element( elem, tool_path or self.data_managers.tool_path )110 def load_from_element( self, elem, tool_path ):111 assert elem.tag == 'data_manager', 'A data manager configuration must have a "data_manager" tag as the root. "%s" is present' % ( elem.tag )112 self.declared_id = elem.get( 'id', None )113 self.guid = elem.get( 'guid', None )114 path = elem.get( 'tool_file', None )115 self.version = elem.get( 'version', self.version )116 tool_shed_repository_id = None117 tool_guid = None118 if path is None:119 tool_elem = elem.find( 'tool' )120 assert tool_elem is not None, "Error loading tool for data manager. Make sure that a tool_file attribute or a tool tag set has been defined:\n%s" % ( util.xml_to_string( elem ) )121 path = tool_elem.get( "file", None )122 tool_guid = tool_elem.get( "guid", None )123 #need to determine repository info so that dependencies will work correctly124 tool_shed_url = tool_elem.find( 'tool_shed' ).text125 # Handle protocol changes.126 tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( self.data_managers.app, tool_shed_url )127 # The protocol is not stored in the database.128 tool_shed = common_util.remove_protocol_from_tool_shed_url( tool_shed_url )129 repository_name = tool_elem.find( 'repository_name' ).text130 repository_owner = tool_elem.find( 'repository_owner' ).text131 installed_changeset_revision = tool_elem.find( 'installed_changeset_revision' ).text132 self.tool_shed_repository_info_dict = dict( tool_shed=tool_shed,133 name=repository_name,134 owner=repository_owner,135 installed_changeset_revision=installed_changeset_revision )136 tool_shed_repository = \137 suc.get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( self.data_managers.app,138 tool_shed,139 repository_name,140 repository_owner,141 installed_changeset_revision )142 if tool_shed_repository is None:143 log.warning( 'Could not determine tool shed repository from database. This should only ever happen when running tests.' )144 #we'll set tool_path manually here from shed_conf_file145 tool_shed_repository_id = None146 try:147 tool_path = util.parse_xml( elem.get( 'shed_conf_file' ) ).getroot().get( 'tool_path', tool_path )148 except Exception, e:149 log.error( 'Error determining tool_path for Data Manager during testing: %s', e )150 else:151 tool_shed_repository_id = self.data_managers.app.security.encode_id( tool_shed_repository.id )152 #use shed_conf_file to determine tool_path153 shed_conf_file = elem.get( "shed_conf_file", None )154 if shed_conf_file:155 shed_conf = self.data_managers.app.toolbox.get_shed_config_dict_by_filename( shed_conf_file, None )156 if shed_conf:157 tool_path = shed_conf.get( "tool_path", tool_path )158 assert path is not None, "A tool file path could not be determined:\n%s" % ( util.xml_to_string( elem ) )159 self.load_tool( os.path.join( tool_path, path ),160 guid=tool_guid,161 data_manager_id=self.id,162 tool_shed_repository_id=tool_shed_repository_id )163 self.name = elem.get( 'name', self.tool.name )164 self.description = elem.get( 'description', self.tool.description )165 self.undeclared_tables = util.asbool( elem.get( 'undeclared_tables', self.undeclared_tables ) )166 for data_table_elem in elem.findall( 'data_table' ):167 data_table_name = data_table_elem.get( "name" )168 assert data_table_name is not None, "A name is required for a data table entry"169 if data_table_name not in self.data_tables:170 self.data_tables[ data_table_name ] = odict()#{}171 output_elem = data_table_elem.find( 'output' )172 if output_elem is not None:173 for column_elem in output_elem.findall( 'column' ):174 column_name = column_elem.get( 'name', None )175 assert column_name is not None, "Name is required for column entry"176 data_table_coumn_name = column_elem.get( 'data_table_name', column_name )177 self.data_tables[ data_table_name ][ data_table_coumn_name ] = column_name178 output_ref = column_elem.get( 'output_ref', None )179 if output_ref is not None:180 if data_table_name not in self.output_ref_by_data_table:181 self.output_ref_by_data_table[ data_table_name ] = {}182 self.output_ref_by_data_table[ data_table_name ][ data_table_coumn_name ] = output_ref183 value_translation_elems = column_elem.findall( 'value_translation' )184 if value_translation_elems is not None:185 for value_translation_elem in value_translation_elems:186 value_translation = value_translation_elem.text187 if value_translation is not None:188 value_translation_type = value_translation_elem.get( 'type', DEFAULT_VALUE_TRANSLATION_TYPE )189 if data_table_name not in self.value_translation_by_data_table_column:190 self.value_translation_by_data_table_column[ data_table_name ] = {}191 if data_table_coumn_name not in self.value_translation_by_data_table_column[ data_table_name ]:192 self.value_translation_by_data_table_column[ data_table_name ][ data_table_coumn_name ] = []193 if value_translation_type == 'function':194 if value_translation in VALUE_TRANSLATION_FUNCTIONS:195 value_translation = VALUE_TRANSLATION_FUNCTIONS[ value_translation ]196 else:197 raise ValueError( "Unsupported value translation function: '%s'" % ( value_translation ) )198 else:199 assert value_translation_type == DEFAULT_VALUE_TRANSLATION_TYPE, ValueError( "Unsupported value translation type: '%s'" % ( value_translation_type ) )200 self.value_translation_by_data_table_column[ data_table_name ][ data_table_coumn_name ].append( value_translation )201 for move_elem in column_elem.findall( 'move' ):202 move_type = move_elem.get( 'type', 'directory' )203 relativize_symlinks = move_elem.get( 'relativize_symlinks', False ) #TODO: should we instead always relativize links?204 source_elem = move_elem.find( 'source' )205 if source_elem is None:206 source_base = None207 source_value = ''208 else:209 source_base = source_elem.get( 'base', None )210 source_value = source_elem.text211 target_elem = move_elem.find( 'target' )212 if target_elem is None:213 target_base = None214 target_value = ''215 else:216 target_base = target_elem.get( 'base', None )217 target_value = target_elem.text218 if data_table_name not in self.move_by_data_table_column:219 self.move_by_data_table_column[ data_table_name ] = {}220 self.move_by_data_table_column[ data_table_name ][ data_table_coumn_name ] = \221 dict( type=move_type,222 source_base=source_base,223 source_value=source_value,224 target_base=target_base,225 target_value=target_value,226 relativize_symlinks=relativize_symlinks )227 @property228 def id( self ):229 return self.guid or self.declared_id #if we have a guid, we will use that as the data_manager id230 def load_tool( self, tool_filename, guid=None, data_manager_id=None, tool_shed_repository_id=None ):231 toolbox = self.data_managers.app.toolbox232 tool = toolbox.load_hidden_tool( tool_filename,233 guid=guid,234 data_manager_id=data_manager_id,235 repository_id=tool_shed_repository_id )236 self.data_managers.app.toolbox.data_manager_tools[ tool.id ] = tool237 self.tool = tool238 return tool239 def process_result( self, out_data ):240 data_manager_dicts = {}241 data_manager_dict = {}242 #TODO: fix this merging below243 for output_name, output_dataset in out_data.iteritems():244 try:245 output_dict = json.loads( open( output_dataset.file_name ).read() )246 except Exception, e:247 log.warning( 'Error reading DataManagerTool json for "%s": %s' % ( output_name, e ) )248 continue249 data_manager_dicts[ output_name ] = output_dict250 for key, value in output_dict.iteritems():251 if key not in data_manager_dict:252 data_manager_dict[ key ] = {}253 data_manager_dict[ key ].update( value )254 data_manager_dict.update( output_dict )255 data_tables_dict = data_manager_dict.get( 'data_tables', {} )256 for data_table_name, data_table_columns in self.data_tables.iteritems():257 data_table_values = data_tables_dict.pop( data_table_name, None )258 if not data_table_values:259 log.warning( 'No values for data table "%s" were returned by the data manager "%s".' % ( data_table_name, self.id ) )260 continue #next data table261 data_table = self.data_managers.app.tool_data_tables.get( data_table_name, None )262 if data_table is None:263 log.error( 'The data manager "%s" returned an unknown data table "%s" with new entries "%s". These entries will not be created. Please confirm that an entry for "%s" exists in your "%s" file.' % ( self.id, data_table_name, data_table_values, data_table_name, 'tool_data_table_conf.xml' ) )264 continue #next table name265 if not isinstance( data_table, SUPPORTED_DATA_TABLE_TYPES ):266 log.error( 'The data manager "%s" returned an unsupported data table "%s" with type "%s" with new entries "%s". These entries will not be created. Please confirm that the data table is of a supported type (%s).' % ( self.id, data_table_name, type( data_table ), data_table_values, SUPPORTED_DATA_TABLE_TYPES ) )267 continue #next table name268 output_ref_values = {}269 if data_table_name in self.output_ref_by_data_table:270 for data_table_column, output_ref in self.output_ref_by_data_table[ data_table_name ].iteritems():271 output_ref_dataset = out_data.get( output_ref, None )272 assert output_ref_dataset is not None, "Referenced output was not found."273 output_ref_values[ data_table_column ] = output_ref_dataset274 if not isinstance( data_table_values, list ):275 data_table_values = [ data_table_values ]276 for data_table_row in data_table_values:277 data_table_value = dict( **data_table_row ) #keep original values here278 for name, value in data_table_row.iteritems(): #FIXME: need to loop through here based upon order listed in data_manager config279 if name in output_ref_values:280 moved = self.process_move( data_table_name, name, output_ref_values[ name ].extra_files_path, **data_table_value )281 data_table_value[ name ] = self.process_value_translation( data_table_name, name, **data_table_value )282 data_table.add_entry( data_table_value, persist=True, entry_source=self )283 if self.undeclared_tables and data_tables_dict:284 # We handle the data move, by just moving all the data out of the extra files path285 # moving a directory and the target already exists, we move the contents instead286 log.debug( 'Attempting to add entries for undeclared tables: %s.', ', '.join( data_tables_dict.keys() ) )287 for ref_file in out_data.values():288 util.move_merge( ref_file.extra_files_path, self.data_managers.app.config.galaxy_data_manager_data_path )289 path_column_names = [ 'path' ]290 for data_table_name, data_table_values in data_tables_dict.iteritems():291 data_table = self.data_managers.app.tool_data_tables.get( data_table_name, None )292 if not isinstance( data_table_values, list ):293 data_table_values = [ data_table_values ]294 for data_table_row in data_table_values:295 data_table_value = dict( **data_table_row ) #keep original values here296 for name, value in data_table_row.iteritems():297 if name in path_column_names:298 data_table_value[ name ] = os.path.abspath( os.path.join( self.data_managers.app.config.galaxy_data_manager_data_path, value ) )299 data_table.add_entry( data_table_value, persist=True, entry_source=self )300 else:301 for data_table_name, data_table_values in data_tables_dict.iteritems():302 # tool returned extra data table entries, but data table was not declared in data manager303 # do not add these values, but do provide messages304 log.warning( 'The data manager "%s" returned an undeclared data table "%s" with new entries "%s". These entries will not be created. Please confirm that an entry for "%s" exists in your "%s" file.' % ( self.id, data_table_name, data_table_values, data_table_name, self.data_managers.filename ) )305 def process_move( self, data_table_name, column_name, source_base_path, relative_symlinks=False, **kwd ):306 if data_table_name in self.move_by_data_table_column and column_name in self.move_by_data_table_column[ data_table_name ]:307 move_dict = self.move_by_data_table_column[ data_table_name ][ column_name ]308 source = move_dict[ 'source_base' ]309 if source is None:310 source = source_base_path311 else:312 source = fill_template( source, GALAXY_DATA_MANAGER_DATA_PATH=self.data_managers.app.config.galaxy_data_manager_data_path, **kwd )313 if move_dict[ 'source_value' ]:314 source = os.path.join( source, fill_template( move_dict[ 'source_value' ], GALAXY_DATA_MANAGER_DATA_PATH=self.data_managers.app.config.galaxy_data_manager_data_path, **kwd ) )315 target = move_dict[ 'target_base' ]316 if target is None:317 target = self.data_managers.app.config.galaxy_data_manager_data_path318 else:319 target = fill_template( target, GALAXY_DATA_MANAGER_DATA_PATH=self.data_managers.app.config.galaxy_data_manager_data_path, **kwd )320 if move_dict[ 'target_value' ]:321 target = os.path.join( target, fill_template( move_dict[ 'target_value' ], GALAXY_DATA_MANAGER_DATA_PATH=self.data_managers.app.config.galaxy_data_manager_data_path, **kwd ) )322 if move_dict[ 'type' ] == 'file':323 dirs, filename = os.path.split( target )324 try:325 os.makedirs( dirs )326 except OSError, e:327 if e.errno != errno.EEXIST:328 raise e329 #moving a directory and the target already exists, we move the contents instead330 util.move_merge( source, target )331 if move_dict.get( 'relativize_symlinks', False ):332 util.relativize_symlinks( target )333 return True334 return False335 def process_value_translation( self, data_table_name, column_name, **kwd ):336 value = kwd.get( column_name )337 if data_table_name in self.value_translation_by_data_table_column and column_name in self.value_translation_by_data_table_column[ data_table_name ]:338 for value_translation in self.value_translation_by_data_table_column[ data_table_name ][ column_name ]:339 if isinstance( value_translation, basestring ):340 value = fill_template( value_translation, GALAXY_DATA_MANAGER_DATA_PATH=self.data_managers.app.config.galaxy_data_manager_data_path, **kwd )341 else:342 value = value_translation( value )343 return value344 def get_tool_shed_repository_info_dict( self ):...

Full Screen

Full Screen

drbdlinks

Source:drbdlinks Github

copy

Full Screen

1#!/usr/bin/env python2#3# Manage a set of links into a DRBD shared directory4#5# Written by: Sean Reifschneider <jafo@tummy.com>6# Copyright (c) 2004-2013, tummy.com, ltd. All Rights Reserved7# drbdlinks is under the following license: GPLv28import os9import sys10import stat11import syslog12import shutil13import glob14import subprocess15configFile = '/etc/drbdlinks.conf'16configDir = '/etc/drbdlinks.d/*.conf'17cleanConfigsDirectory = '/var/lib/drbdlinks/configs-to-clean'18syslog.openlog('drbdlinks', syslog.LOG_PID)19try:20 import optparse21except ImportError:22 import optik23 optparse = optik24try:25 from subprocess import DEVNULL26except ImportError:27 DEVNULL = open(os.devnull, 'wb')28try:29 execfile30except NameError:31 def execfile(filepath, globals=None, locals=None):32 with open(filepath, 'rb') as file:33 exec(compile(file.read(), filepath, 'exec'), globals, locals)34class lsb:35 class statusRC:36 OK = 037 VAR_PID = 138 VAR_LOCK = 239 STOPPED = 340 UNKNOWN = 441 LSBRESERVED = 542 DISTRESERVED = 10043 APPRESERVED = 15044 RESERVED = 20045 class exitRC:46 OK = 047 GENERIC = 148 EINVAL = 249 ENOTSUPPORTED = 350 EPERM = 451 NOTINSTALLED = 552 NOTCONFIGED = 653 NOTRUNNING = 754 LSBRESERVED = 855 DISTRESERVED = 10056 APPRESERVED = 15057 RESERVED = 20058###########59def log(s):60 sys.stderr.write(s)61 syslog.syslog(s)62##############################################63def multiInitRestart(flavor, initscript_list):64 for initscript in initscript_list:65 if os.path.exists(initscript):66 if initscript.endswith(".service"):67 retcode = os.system(68 'systemctl restart %s' % initscript.rsplit('/', 1)[1])69 else:70 retcode = os.system('%s restart' % initscript)71 if retcode != 0:72 log('%s restart returned %d, expected 0' % (flavor, retcode))73 return(retcode != 0)74 syslog.syslog(75 'Unable to locate %s init script, not restarting.' % flavor)76 return(0)77##########################78def restartSyslog(config):79 if not config.restartSyslog:80 return(0)81 return multiInitRestart(82 'syslog', ['/etc/init.d/syslog', '/etc/init.d/rsyslog',83 '/usr/lib/systemd/system/rsyslog.service'])84########################85def restartCron(config):86 if not config.restartCron:87 return(0)88 return multiInitRestart(89 'cron', ['/etc/init.d/crond', '/etc/init.d/cron',90 '/usr/lib/systemd/system/crond.service'])91#######################92def testConfig(config):93 allUp = True94 for linkLocal, linkDest, useBindLink in config.linkList:95 suffixName = linkLocal + options.suffix96 # check to see if the link is in place97 if not os.path.exists(suffixName):98 allUp = False99 if options.verbose >= 1:100 print(101 'testConfig: Original file not present: "%s"' % suffixName)102 continue103 if options.verbose >= 1:104 print('testConfig: Returning %s' % allUp)105 return(allUp)106###############################107def loadConfigFile(configFile):108 class configClass:109 def __init__(self):110 self.mountpoint = None111 self.cleanthisconfig = 0112 self.linkList = []113 self.useSELinux = 0114 self.selinuxenabledPath = None115 self.useBindMount = 0116 self.debug = 0117 self.restartSyslog = 0118 self.restartCron = 0119 self.makeMountpointShared = 0120 # Locate where the selinuxenabled binary is121 for path in (122 '/usr/sbin/selinuxenabled',123 '/sbin/selinuxenabled', ):124 if os.path.exists(path):125 self.selinuxenabledPath = path126 break127 # auto-detect if SELinux is on128 if self.selinuxenabledPath:129 ret = os.system(self.selinuxenabledPath)130 if ret == 0:131 self.useSELinux = 1132 # detect what ls(1) supports to show SELinux context133 try:134 subprocess.check_call(135 ['ls', '--scontext', __file__],136 stdout=DEVNULL, stderr=DEVNULL)137 self.showContextCommand = 'ls -d --scontext "%s"'138 except subprocess.CalledProcessError:139 self.showContextCommand = 'ls -d -Z -1 "%s"'140 def cmd_cleanthisconfig(self, enabled=1):141 self.cleanthisconfig = enabled142 def cmd_mountpoint(self, arg, shared=0):143 self.mountpoint = arg144 if shared:145 self.makeMountpointShared = 1146 def cmd_link(self, src, dest=None):147 self.linkList.append((src, dest, self.useBindMount))148 def cmd_selinux(self, enabled=1):149 self.useSELinux = enabled150 def cmd_usebindmount(self, enabled=1):151 self.useBindMount = enabled152 def cmd_debug(self, level=1):153 self.debug = level154 def cmd_restartSyslog(self, enabled=1):155 self.restartSyslog = enabled156 def cmd_restartCron(self, enabled=1):157 self.restartCron = enabled158 # set up config environment159 config = configClass()160 namespace = {161 'mountpoint': config.cmd_mountpoint,162 'link': config.cmd_link,163 'selinux': config.cmd_selinux,164 'debug': config.cmd_debug,165 'usebindmount': config.cmd_usebindmount,166 'restartSyslog': config.cmd_restartSyslog,167 'restartsyslog': config.cmd_restartSyslog,168 'restartCron': config.cmd_restartCron,169 'restartcron': config.cmd_restartCron,170 'cleanthisconfig': config.cmd_cleanthisconfig,171 }172 # load the files173 for filename in [configFile] + sorted(glob.iglob(configDir)):174 try:175 execfile(filename, {}, namespace)176 except Exception:177 print(178 'ERROR: Loading configuration file "%s" failed. '179 'See below for details:' % filename)180 print('Environment: %s' % repr(181 ['%s=%s' % i for i in os.environ.items()182 if i[0].startswith('OCF_')]))183 raise184 # process the data we got185 if config.mountpoint:186 config.mountpoint = config.mountpoint.rstrip('/')187 for i in range(len(config.linkList)):188 oldList = config.linkList[i]189 if oldList[1]:190 arg2 = oldList[1].rstrip('/')191 else:192 if not config.mountpoint:193 log(194 'ERROR: Used link() when no mountpoint() was set '195 'in the config file.\n')196 sys.exit(3)197 arg2 = oldList[0].lstrip('/')198 arg2 = os.path.join(config.mountpoint, arg2).rstrip('/')199 config.linkList[i] = (200 [oldList[0].rstrip('/'), arg2] + list(oldList[2:]))201 # return the data202 return(config)203def print_metadata():204 print('''205<?xml version="1.0"?>206<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">207<!-- Root element: give the name of the Resource agent -->208<resource-agent name="drbdlinks" version="@@@VERSION@@@">209<!-- Version number of the standard this complies with -->210<version>1.0</version>211<!-- List all the instance parameters the RA supports or requires. -->212<parameters>213<!-- Note that parameters flagged with 'unique' must be unique; ie no214 other resource instance of this resource type may have the same set215 of unique parameters.216-->217<parameter name="configfile" unique="1">218<longdesc lang="en">219The full path of the configuration file on disc. The default is220/etc/drbdlinks.conf, but you may wish to store this on the shared221storage, or have different config files if you have multiple222resource groups.223</longdesc>224<shortdesc lang="en">Configuration Filename</shortdesc>225<content type="string" default="/etc/drbdlinks.conf" />226</parameter>227<parameter name="suffix">228<longdesc lang="en">229The suffix of the files/directories that are moved out of the way on the host230file-system to make room for the symlink. By default this is ".drbdlinks".231</longdesc>232<shortdesc lang="en">Host Filename Suffix</shortdesc>233<content type="string" default=".drbdlinks" />234</parameter>235</parameters>236<!-- List the actions supported by the RA -->237<actions>238<action name="start" timeout="1m" />239<action name="stop" timeout="1m" />240<action name="monitor" depth="0" timeout="20" interval="10" />241<action name="meta-data" timeout="5" />242</actions>243</resource-agent>244'''.strip())245 sys.exit(lsb.exitRC.OK)246# meta-data may happen when configuration is not present.247if 'meta-data' in sys.argv:248 print_metadata()249# parse arguments250parser = optparse.OptionParser()251parser.add_option(252 '-c', '--config-file', dest='configFile', type='string',253 default=configFile,254 help='Location of the configuration file.')255parser.add_option(256 '-s', '--suffix', dest='suffix', type='string',257 default='.drbdlinks',258 help='Name to append to the local file-system name when the link '259 'is in place.')260parser.add_option(261 '-v', '--verbose', default=0,262 dest='verbose', action='count',263 help='Increase verbosity level by 1 for every "-v".')264parser.set_usage(265 '%prog (start|stop|auto|status|monitor|list|checklinks|\n'266 ' initialize_shared_storage)')267options, args = parser.parse_args()268origConfigFile = configFile269# if called from OCF, parse the environment270OCFenabled = os.environ.get('OCF_RA_VERSION_MAJOR') is not None271if OCFenabled:272 try:273 options.configFile = os.environ['OCF_RESKEY_configfile']274 except KeyError:275 pass276 try:277 options.suffix = os.environ['OCF_RESKEY_suffix']278 except KeyError:279 pass280configFile = options.configFile281# figure out what the mode to run in282if len(args) == 1 or (OCFenabled and len(args) > 0):283 # NOTE: OCF specifies that additional arguments beyond the first should284 # be ignored.285 if args[0] not in (286 'start', 'stop', 'auto', 'monitor', 'status', 'list',287 'checklinks', 'initialize_shared_storage'):288 parser.error(289 'ERROR: Unknown mode "%s", expecting one of '290 '(start|stop|auto|\n status|monitor|list|checklinks|'291 'initialize_shared_storage)' % args[0])292 sys.exit(lsb.exitRC.ENOTSUPPORTED)293 mode = args[0]294else:295 parser.error('Expected exactly one argument to specify the mode.')296 sys.exit(lsb.exitRC.EINVAL)297if options.verbose >= 2:298 print('Initial mode: "%s"' % mode)299# load config file300try:301 config = loadConfigFile(configFile)302except IOError as e:303 if e.errno == 2:304 if mode == 'monitor' or mode == 'status':305 print(306 'WARNING: Config file "%s" not found, assuming drbdlinks '307 'is stopped' % configFile)308 if mode == 'status':309 sys.exit(lsb.statusRC.STOPPED)310 else:311 sys.exit(lsb.exitRC.NOTRUNNING)312 print('ERROR: Unable to open config file "%s":' % configFile)313 print(' %s' % e)314 syslog.syslog('Invalid config file "%s"' % configFile)315 sys.exit(lsb.statusRC.UNKNOWN)316 raise317if not config.mountpoint:318 log('No mountpoint found in config file. Aborting.\n')319 if mode == 'monitor':320 if config.debug:321 syslog.syslog('Monitor called without mount point')322 sys.exit(lsb.exitRC.EINVAL)323 if config.debug:324 syslog.syslog('No mount point')325 sys.exit(lsb.statusRC.UNKNOWN)326if not os.path.exists(config.mountpoint):327 log('Mountpoint "%s" does not exist. Aborting.\n' % config.mountpoint)328 if mode == 'monitor':329 if config.debug:330 syslog.syslog('Mount point does not exist, monitor mode')331 sys.exit(lsb.exitRC.EINVAL)332 if config.debug:333 syslog.syslog('Mount point does not exist')334 sys.exit(lsb.statusRC.UNKNOWN)335# startup log message336if config.debug:337 syslog.syslog(338 'drbdlinks starting: args: "%s", configfile: "%s"'339 % (repr(sys.argv), configFile))340# if mode is auto, figure out what mode to use341if mode == 'auto':342 if (os.stat(config.mountpoint).st_dev !=343 os.stat(os.path.join(config.mountpoint, '..')).st_dev):344 if options.verbose >= 1:345 print('Detected mounted file-system on "%s"' % config.mountpoint)346 mode = 'start'347 else:348 mode = 'stop'349if options.verbose >= 1:350 print('Mode: "%s"' % mode)351# just display the list of links352if mode == 'list':353 for linkLocal, linkDest, useBindMount in config.linkList:354 print('%s %s %s' % (linkLocal, linkDest, useBindMount))355 sys.exit(0)356# set up links357anyLinksChanged = 0358if mode == 'start':359 errorCount = 0360 # set up shared mountpoint361 if config.makeMountpointShared:362 os.system('mount --make-shared "%s"' % config.mountpoint)363 # loop over links364 for linkLocal, linkDest, useBindMount in config.linkList:365 suffixName = linkLocal + options.suffix366 # check to see if the link is in place367 if os.path.exists(suffixName):368 if options.verbose >= 1:369 print(370 'Skipping, appears to already be linked: "%s"' % linkLocal)371 continue372 # make the link373 try:374 if options.verbose >= 2:375 print('Renaming "%s" to "%s"' % (linkLocal, suffixName))376 os.rename(linkLocal, suffixName)377 anyLinksChanged = 1378 except (OSError, IOError) as e:379 log(380 'Error renaming "%s" to "%s": %s\n'381 % (suffixName, linkLocal, str(e)))382 errorCount = errorCount + 1383 if options.verbose >= 2:384 print('Linking "%s" to "%s"' % (linkDest, linkLocal))385 anyLinksChanged = 1386 if useBindMount:387 st = os.stat(linkDest)388 if stat.S_ISREG(st.st_mode):389 open(linkLocal, 'w').close()390 else:391 os.mkdir(linkLocal)392 os.system('mount -o bind "%s" "%s"' % (linkDest, linkLocal))393 else:394 try:395 os.symlink(linkDest, linkLocal)396 except (OSError, IOError) as e:397 log(398 'Error linking "%s" to "%s": %s'399 % (linkDest, linkLocal, str(e)))400 errorCount = errorCount + 1401 # set up in SELinux402 if config.useSELinux:403 fp = os.popen(config.showContextCommand % suffixName, 'r')404 line = fp.readline()405 fp.close()406 if line:407 line = line.split(' ')[0]408 seInfo = line.split(':')409 seUser, seRole, seType = seInfo[:3]410 if len(seInfo) >= 4:411 seRange = seInfo[3]412 os.system(413 'chcon -h -u "%s" -r "%s" -t "%s" -l "%s" "%s"'414 % (seUser, seRole, seType, seRange, linkLocal))415 else:416 os.system(417 'chcon -h -u "%s" -r "%s" -t "%s" "%s"'418 % (seUser, seRole, seType, linkLocal))419 if anyLinksChanged:420 if restartSyslog(config):421 errorCount = errorCount + 1422 if restartCron(config):423 errorCount = errorCount + 1424 if config.cleanthisconfig and origConfigFile != configFile:425 if not os.path.exists(cleanConfigsDirectory):426 if config.debug:427 syslog.syslog(428 'Config copy directory "%s" does not exist.'429 % cleanConfigsDirectory)430 else:431 if config.debug:432 syslog.syslog('Preserving a copy of the config file.')433 shutil.copy(configFile, cleanConfigsDirectory)434 if errorCount:435 if config.debug:436 syslog.syslog('Exiting due to %d errors' % errorCount)437 sys.exit(lsb.exitRC.GENERIC)438 if config.debug:439 syslog.syslog('Exiting with no errors')440 sys.exit(lsb.exitRC.OK)441# remove links442elif mode == 'stop':443 errorCount = 0444 for linkLocal, linkDest, useBindMount in config.linkList:445 suffixName = linkLocal + options.suffix446 # check to see if the link is in place447 if not os.path.exists(suffixName):448 if options.verbose >= 1:449 print(450 'Skipping, appears to already be shut down: "%s"'451 % linkLocal)452 continue453 # break the link454 try:455 if options.verbose >= 2:456 print('Removing "%s"' % (linkLocal,))457 anyLinksChanged = 1458 if useBindMount:459 os.system('umount "%s"' % linkLocal)460 try:461 os.remove(linkLocal)462 except Exception:463 pass464 if os.path.exists(linkLocal):465 os.rmdir(linkLocal)466 else:467 os.remove(linkLocal)468 except (OSError, IOError) as e:469 log('Error removing "%s": %s\n' % (linkLocal, str(e)))470 errorCount = errorCount + 1471 try:472 if options.verbose >= 2:473 print('Renaming "%s" to "%s"' % (suffixName, linkLocal))474 os.rename(suffixName, linkLocal)475 anyLinksChanged = 1476 except (OSError, IOError) as e:477 log(478 'Error renaming "%s" to "%s": %s\n'479 % (suffixName, linkLocal, str(e)))480 errorCount = errorCount + 1481 if anyLinksChanged:482 restartSyslog(config)483 restartCron(config)484 if errorCount:485 if config.debug:486 syslog.syslog('Exiting due to %d errors' % errorCount)487 sys.exit(lsb.exitRC.GENERIC)488 if config.debug:489 syslog.syslog('Exiting with no errors')490 sys.exit(lsb.exitRC.OK)491# monitor mode492elif mode == 'monitor':493 if testConfig(config):494 if config.debug:495 syslog.syslog('Monitor mode returning ok')496 sys.exit(lsb.exitRC.OK)497 if config.debug:498 syslog.syslog('Monitor mode returning notrunning')499 sys.exit(lsb.exitRC.NOTRUNNING)500# status mode501elif mode == 'status':502 if testConfig(config):503 print("info: DRBD Links OK (present)")504 if config.debug:505 syslog.syslog('Status mode returning ok')506 sys.exit(lsb.statusRC.OK)507 print("info: DRBD Links stopped (not set up)")508 if config.debug:509 syslog.syslog('Status mode returning stopped')510 sys.exit(lsb.statusRC.STOPPED)511# check mode512elif mode == 'checklinks':513 for linkLocal, linkDest, useBindMount in config.linkList:514 if not os.path.exists(linkDest):515 print('Does not exist: %s' % linkDest)516 sys.exit(lsb.exitRC.OK)517# initialize_shared_storage mode518elif mode == 'initialize_shared_storage':519 def dirs_to_make(src, dest):520 '''Return a list of paths, from top to bottom, that need to be created521 in the destination. The return value is a list of tuples of522 (src, dest) where the `src` is the corresponding source directory to523 the `dest`.524 '''525 retval = []526 destdirname = os.path.dirname(dest)527 srcdirname = os.path.dirname(src)528 while srcdirname and srcdirname != '/':529 if os.path.exists(destdirname):530 break531 if os.path.basename(destdirname) != os.path.basename(srcdirname):532 break533 retval.append((srcdirname, destdirname))534 srcdirname = os.path.dirname(srcdirname)535 destdirname = os.path.dirname(destdirname)536 return retval[::-1]537 relative_symlinks = []538 for linkLocal, linkDest, useBindMount in config.linkList:539 if os.path.exists(linkDest):540 continue541 for src, dest in dirs_to_make(linkLocal, linkDest):542 print('Making directory "%s"' % dest)543 os.mkdir(dest)544 srcstat = os.stat(src)545 os.chmod(dest, srcstat.st_mode)546 os.chown(dest, srcstat.st_uid, srcstat.st_gid)547 print('Copying "%s" to "%s"' % (linkLocal, linkDest))548 os.system('cp -ar "%s" "%s"' % (linkLocal, linkDest))549 fp = os.popen('find "%s" -type l -lname "[^/].*" -print0' % linkLocal)550 relative_symlinks += [l for l in fp.read().split('\0') if l]551 fp.close()552 if relative_symlinks:553 print(554 '\nWARNING: The following copied files contain relative '555 'symlinks:\n')556 for symlink in relative_symlinks:557 print(' %s -> %s' % (symlink, os.readlink(symlink)))...

Full Screen

Full Screen

build_tools_cli.py

Source:build_tools_cli.py Github

copy

Full Screen

...246 args.relative_sh_script,247 artifact_dir,248 filename))249 if args.relative_symlinks:250 handlers.append(lambda filename: build_tools.postprocess_relative_symlinks(ctx.logger, artifact_dir,251 filename))252 if args.remove_pkgconfig:253 handlers.append(lambda filename: build_tools.postprocess_remove_pkgconfig(254 ctx.logger, filename))255 if args.relative_pkgconfig:256 handlers.append(lambda filename: build_tools.postprocess_relative_pkgconfig(257 ctx.logger, artifact_dir, filename))258 if args.check_relocatable:259 handlers.append(lambda filename: build_tools.check_relocatable(ctx.logger, args.check_ignore,260 artifact_dir, filename))261 if args.write_protect:262 handlers.append(build_tools.write_protect)263 if args.path is None:264 try:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful