How to use sample_report method in Lemoncheesecake

Best Python code snippet using lemoncheesecake

run_tool.py

Source:run_tool.py Github

copy

Full Screen

1""" Run the pipeline and extract data.2Input is a json prepared by the staging tool.3"""4from importlib_resources import files5from pythologist_schemas import get_validator6from pythologist_reader.formats.inform import read_standard_format_sample_to_project7from pythologist import CellDataFrame, SubsetLogic as SL, PercentageLogic as PL8import logging, argparse, json, uuid9from collections import OrderedDict10import pandas as pd11import numpy as np12from datetime import datetime13import gzip, os14from tempfile import NamedTemporaryFile15def cli():16 args = do_inputs()17 main(args)18def main(args):19 "We need to take the platform and return an appropriate input template"20 if args.verbose:21 logging.basicConfig(level=logging.DEBUG)22 else:23 logging.basicConfig(level=logging.WARNING)24 if args.cache_directory:25 if not os.path.exists(args.cache_directory):26 os.makedirs(args.cache_directory)27 if not os.path.isdir(args.cache_directory):28 raise ValueError("cache directory not a directory")29 logger = logging.getLogger("start run")30 run_id = str(uuid.uuid4())31 logger.info("run_id "+run_id)32 inputs = json.loads(open(args.input_json,'rt').read())33 # Lets start by checking our inputs34 logger.info("check project json format")35 get_validator(files('schema_data.inputs.platforms.InForm').joinpath('project.json')).\36 validate(inputs['project'])37 logger.info("check analysis json format")38 get_validator(files('schema_data.inputs.platforms.InForm').joinpath('analysis.json')).\39 validate(inputs['analysis'])40 logger.info("check report json format")41 get_validator(files('schema_data.inputs').joinpath('report.json')).\42 validate(inputs['report'])43 logger.info("check panel json format")44 get_validator(files('schema_data.inputs').joinpath('panel.json')).\45 validate(inputs['panel'])46 _validator = get_validator(files('schema_data.inputs.platforms.InForm').joinpath('files.json'))47 for sample_input_json in inputs['sample_files']:48 logger.info("check sample files json format "+str(sample_input_json['sample_name']))49 _validator.validate(sample_input_json)50 # Now lets step through sample-by-sample executing the pipeline51 output = {52 'run_id':run_id,53 'time':str(datetime.now()),54 'project_name':inputs['project']['parameters']['project_name'],55 'report_name':inputs['report']['parameters']['report_name'],56 'report_version':inputs['report']['parameters']['report_version'],57 'analysis_name':inputs['analysis']['parameters']['analysis_name'],58 'analysis_version':inputs['analysis']['parameters']['analysis_version'],59 'panel_name':inputs['panel']['parameters']['panel_name'],60 'panel_version':inputs['panel']['parameters']['panel_version'],61 'sample_outputs':[execute_sample(x,inputs,run_id,verbose=args.verbose,cache_directory=args.cache_directory) for x in inputs['sample_files']]62 }63 logger.info("Finished reading creating output. Validate output format.")64 _validator = get_validator(files('schema_data').joinpath('report_output.json'))65 _validator.validate(output)66 logger.info("Validated output schema against schema")67 #logger.info(str(output['sample_outputs']))68 if args.output_json:69 with open(args.output_json,'wt') as of:70 of.write(json.dumps(output,allow_nan=False))71 return 72def execute_sample(files_json,inputs,run_id,verbose=False,cache_directory=None):73 primary_export = [x['export_name'] for x in inputs['analysis']['inform_exports'] if x['primary_phenotyping']][0]74 mutually_exclusive_phenotypes = [x['phenotype_name'] for x in inputs['analysis']['mutually_exclusive_phenotypes'] if x['export_name']==primary_export]75 logger = logging.getLogger(str(files_json['sample_name']))76 logger.info("staging channel abbreviations")77 channel_abbreviations = dict([(x['full_name'],x['marker_name']) for x in inputs['panel']['markers']])78 logger.info("reading exports to temporary h5")79 exports = read_standard_format_sample_to_project(files_json['sample_directory'],80 inputs['analysis']['parameters']['region_annotation_strategy'],81 channel_abbreviations = channel_abbreviations,82 sample = files_json['sample_name'],83 project_name = inputs['project']['parameters']['project_name'],84 custom_mask_name = inputs['analysis']['parameters']['region_annotation_custom_label'],85 other_mask_name = inputs['analysis']['parameters']['unannotated_region_label'],86 microns_per_pixel = inputs['project']['parameters']['microns_per_pixel'],87 line_pixel_steps = int(round(float(inputs['analysis']['parameters']['expanded_margin_width_um'] / \88 inputs['project']['parameters']['microns_per_pixel'])-float(inputs['analysis']['parameters']['draw_margin_width']))),89 verbose = False90 )91 logger.info("getting the primary export")92 primary_export_name = [x['export_name'] for x in inputs['analysis']['inform_exports'] if x['primary_phenotyping']]93 if len(primary_export_name) != 1: raise ValueError("didnt find the 1 single expected primary phenotyping in analysis")94 primary_export_name = primary_export_name[0]95 cpi = None96 cdfs = {}97 for export_name in exports:98 logger.info("extract CellDataFrame from h5 objects "+str(export_name))99 if export_name == primary_export_name:100 cpi = exports[export_name]101 cdfs[export_name] = exports[export_name].cdf.zero_fill_missing_phenotypes()102 cdfs[export_name]['project_id'] = run_id # force them to have the same project_id103 cdfs[export_name]['project_name'] = inputs['project']['parameters']['project_name']104 meps = [x['phenotype_name'] for x in inputs['analysis']['mutually_exclusive_phenotypes'] if x['export_name']==export_name and \105 x['convert_to_binary']]106 if len(meps) > 0:107 logger.info("converting mutually exclusive phenotype to binary phenotype for "+str(meps))108 cdfs[export_name] = cdfs[export_name].phenotypes_to_scored(phenotypes=meps,overwrite=False)109 110 cdf = cdfs[primary_export_name]111 for export_name in [x for x in cdfs if x!=primary_export_name]:112 logger.info("merging in "+str(export_name))113 _cdf = cdfs[export_name]114 _cdf['project_id'] = run_id115 cdf,f = cdf.merge_scores(_cdf,on=['project_name','sample_name','frame_name','x','y','cell_index'])116 if f.shape[0] > 0:117 raise ValueError("segmentation mismatch error "+str(f.shape[0]))118 logger.info("merging completed")119 # Now cdf contains a CellDataFrame sutiable for data extraction120 # One last check for logic of this extraction. Make sure we have all the expected phenotypes being staged in the CellDataFrame121 _missing = set(mutually_exclusive_phenotypes) - set(cdf.phenotypes)122 for phenotype_name in _missing:123 logger.warning("adding in a zeroed mutually exclusive phenotype "+str(phenotype_name)+" for this run.")124 cdf = cdf.add_zeroed_phenotype(phenotype_name)125 _unknown = set(cdf.phenotypes)- set(mutually_exclusive_phenotypes)126 if len(_unknown) > 0: raise ValueError("phenotypes we should not be seeing are present. "+str(_unknown))127 # For density measurements build our population definitions128 density_populations = []129 for population in inputs['report']['population_densities']:130 _pop = SL(phenotypes = population['mutually_exclusive_phenotypes'], 131 scored_calls = dict([(x['target_name'],x['filter_direction']) for x in population['binary_phenotypes']]),132 label = population['population_name']133 )134 density_populations.append(_pop)135 percentage_populations = []136 for population in inputs['report']['population_percentages']:137 _numerator = SL(phenotypes = population['numerator_mutually_exclusive_phenotypes'], 138 scored_calls = dict([(x['target_name'],x['filter_direction']) for x in population['numerator_binary_phenotypes']])139 )140 _denominator = SL(phenotypes = population['denominator_mutually_exclusive_phenotypes'], 141 scored_calls = dict([(x['target_name'],x['filter_direction']) for x in population['denominator_binary_phenotypes']])142 )143 _pop = PL(numerator = _numerator,144 denominator = _denominator,145 label = population['population_name']146 )147 percentage_populations.append(_pop)148 # Now calculate outputs for each region we are working with149 fcnts = []150 scnts = []151 fpcnts = []152 spcnts = []153 for report_region_row in inputs['report']['region_selection']:154 # Iterate over each region combining them to the name specified155 report_region_name = report_region_row['report_region_name']156 regions_to_combine = report_region_row['regions_to_combine']157 logger.info("extracting data for region '"+str(report_region_name)+"' which is made up of "+str(regions_to_combine))158 _cdf = cdf.combine_regions(regions_to_combine,report_region_name)159 # Fetch counts based on qc constraints160 _cnts = _cdf.counts(minimum_region_size_pixels=inputs['report']['parameters']['minimum_density_region_size_pixels'],161 minimum_denominator_count=inputs['report']['parameters']['minimum_denominator_count'])162 logger.info("frame-level densities")163 _fcnts = _cnts.frame_counts(subsets=density_populations)164 _fcnts = _fcnts.loc[_fcnts['region_label']==report_region_name,:]165 fcnts.append(_fcnts)166 logger.info("sample-level densities")167 _scnts = _cnts.sample_counts(subsets=density_populations)168 _scnts = _scnts.loc[_scnts['region_label']==report_region_name,:]169 scnts.append(_scnts)170 logger.info("frame-level percentages")171 _fpcnts = _cnts.frame_percentages(percentage_logic_list=percentage_populations)172 _fpcnts = _fpcnts.loc[_fpcnts['region_label']==report_region_name,:]173 fpcnts.append(_fpcnts)174 logger.info("sample-level percentages")175 _spcnts = _cnts.sample_percentages(percentage_logic_list=percentage_populations)176 _spcnts = _spcnts.loc[_spcnts['region_label']==report_region_name,:]177 spcnts.append(_spcnts)178 fcnts = pd.concat(fcnts).reset_index(drop=True)179 scnts = pd.concat(scnts).reset_index(drop=True)180 fpcnts = pd.concat(fpcnts).reset_index(drop=True)181 spcnts = pd.concat(spcnts).reset_index(drop=True)182 #prepare an output json 183 output = {184 "sample_name":files_json['sample_name'],185 "sample_reports":{186 'sample_cumulative_count_densities':[],187 'sample_aggregate_count_densities':[],188 'sample_cumulative_count_percentages':[],189 'sample_aggregate_count_percentages':[]190 },191 "images":[]192 }193 # Now fill in the data194 for image_name in [x['image_name'] for x in files_json['exports'][0]['images']]:195 pmap_cnames,pmap_rows,frame_shape, region_sizes = _get_image_info(image_name,files_json['sample_name'],cdf)196 output['images'].append({197 'image_name':image_name,198 'image_size_pixels':frame_shape,199 "microns_per_pixel":inputs['project']['parameters']['microns_per_pixel'],200 'image_reports':{201 'image_count_densities':_organize_frame_count_densities(fcnts.loc[fcnts['frame_name']==image_name],inputs['report']['parameters']['minimum_density_region_size_pixels']),202 'image_count_percentages':_organize_frame_percentages(fpcnts.loc[fpcnts['frame_name']==image_name],inputs['report']['parameters']['minimum_denominator_count'])203 },204 'phenotype_map':{205 'column_names':pmap_cnames,206 'rows':pmap_rows,207 'mutually_exclusive_phenotypes':mutually_exclusive_phenotypes208 },209 'region_sizes':region_sizes210 })211 # Do sample level densities212 output['sample_reports']['sample_cumulative_count_densities'] = \213 _organize_sample_cumulative_count_densities(scnts,inputs['report']['parameters']['minimum_density_region_size_pixels'])214 output['sample_reports']['sample_aggregate_count_densities'] = \215 _organize_sample_aggregate_count_densities(scnts,inputs['report']['parameters']['minimum_density_region_size_pixels'])216 # Now do percentages217 output['sample_reports']['sample_cumulative_count_percentages'] = \218 _organize_sample_cumulative_percentages(spcnts,inputs['report']['parameters']['minimum_density_region_size_pixels'])219 output['sample_reports']['sample_aggregate_count_percentages'] = \220 _organize_sample_aggregate_percentages(spcnts,inputs['report']['parameters']['minimum_density_region_size_pixels'])221 output['intermediate_files'] = {}222 output['intermediate_files']['project_h5'] = None223 output['intermediate_files']['celldataframe_h5'] = None224 if cache_directory:225 ntf1 = NamedTemporaryFile(dir=cache_directory,delete=False,prefix='PROJ-',suffix='.h5')226 logger.info("saving project to "+str(ntf1.name))227 cpi.to_hdf(ntf1.name,overwrite=True)228 output['intermediate_files']['project_h5'] = ntf1.name229 ntf2 = NamedTemporaryFile(dir=cache_directory,delete=False,prefix='CDF-',suffix='.h5')230 logger.info("saving celldataframe to "+str(ntf2.name))231 cdf.to_hdf(ntf2.name,'data')232 output['intermediate_files']['celldataframe_h5'] = ntf2.name233 return output234def _get_image_info(image_name,sample_name,cdf):235 subset = cdf.loc[(cdf['sample_name']==sample_name)&(cdf['frame_name']==image_name)].copy().dropna(subset=['phenotype_label'])236 rows = []237 for k,v in subset.loc[:,['cell_index','x','y','region_label','phenotype_label','scored_calls']].\238 set_index(['cell_index','x','y','region_label','phenotype_label'])['scored_calls'].to_dict().items():239 rows.append(list(k)+[[(k0,v0) for k0,v0 in v.items()]])240 return ['cell_index','x','y','region_name','mutually_exclusive_phenotype','binary_phenotypes'], \241 rows, \242 dict(zip(('y','x'),subset.iloc[0]['frame_shape'])), \243 [x for x in subset.get_measured_regions()[['region_label','region_area_pixels']].\244 rename(columns={'region_label':'region_name'}).T.to_dict().values()]245def _organize_frame_percentages(frame_percentages,min_denominator_count):246 # Make the list of sample count density features in dictionary format247 # make an object to convert pythologist internal count reports to the expected column names248 conv = OrderedDict({249 'region_label':'region_name',250 'phenotype_label':'population_name',251 'numerator':'numerator_count',252 'denominator':'denominator_count',253 'fraction':'fraction',254 'percent':'percent'255 })256 keeper_columns = list(conv.values())257 frame_report = frame_percentages.rename(columns=conv).loc[:,keeper_columns]258 frame_report['measure_qc_pass'] = True259 frame_report.loc[frame_report['denominator_count'] < min_denominator_count,'measure_qc_pass'] = False260 frame_report = frame_report.replace([np.inf,-np.inf],np.nan)261 frame_report = frame_report.where(pd.notnull(frame_report), None)262 return [row.to_dict() for index,row in frame_report.iterrows()]263def _organize_frame_count_densities(frame_count_densities,min_pixel_count):264 # Make the list of sample count density features in dictionary format265 # make an object to convert pythologist internal count reports to the expected column names266 conv = OrderedDict({267 'region_label':'region_name',268 'phenotype_label':'population_name',269 'region_area_pixels':'region_area_pixels',270 'region_area_mm2':'region_area_mm2',271 'count':'count',272 'density_mm2':'density_mm2'273 })274 keeper_columns = list(conv.values())275 #print(keeper_columns)276 #print(frame_count_densities.rename(columns=conv).columns)277 frame_report = frame_count_densities.rename(columns=conv).loc[:,keeper_columns]278 frame_report['measure_qc_pass'] = True279 frame_report.loc[frame_report['region_area_pixels'] < min_pixel_count,'measure_qc_pass'] = False280 frame_report = frame_report.where(pd.notnull(frame_report), None)281 return [row.to_dict() for index,row in frame_report.iterrows()]282def _organize_sample_cumulative_count_densities(sample_count_densities,min_pixel_count):283 # Make the list of sample count density features in dictionary format284 # make an object to convert pythologist internal count reports to the expected column names285 conv = OrderedDict({286 'region_label':'region_name',287 'phenotype_label':'population_name',288 'frame_count':'image_count',289 'cumulative_region_area_pixels':'cumulative_region_area_pixels',290 'cumulative_region_area_mm2':'cumulative_region_area_mm2',291 'cumulative_count':'cumulative_count',292 'cumulative_density_mm2':'cumulative_density_mm2'293 })294 keeper_columns = list(conv.values())295 sample_report = sample_count_densities.rename(columns=conv).loc[:,keeper_columns]296 sample_report['measure_qc_pass'] = True297 sample_report.loc[sample_report['cumulative_region_area_pixels'] < min_pixel_count,'measure_qc_pass'] = False298 299 sample_report = sample_report.where(pd.notnull(sample_report), None)300 return [row.to_dict() for index,row in sample_report.iterrows()]301def _organize_sample_aggregate_count_densities(sample_count_densities,min_pixel_count):302 # Make the list of sample count density features in dictionary format303 # make an object to convert pythologist internal count reports to the expected column names304 conv = OrderedDict({305 'region_label':'region_name',306 'phenotype_label':'population_name',307 'frame_count':'image_count',308 'measured_frame_count':'aggregate_measured_image_count',309 'mean_density_mm2':'aggregate_mean_density_mm2',310 'stddev_density_mm2':'aggregate_stddev_density_mm2',311 'stderr_density_mm2':'aggregate_stderr_density_mm2'312 })313 keeper_columns = list(conv.values())314 sample_report = sample_count_densities.rename(columns=conv).loc[:,keeper_columns]315 sample_report['measure_qc_pass'] = True316 sample_report.loc[sample_report['aggregate_measured_image_count'] < 1,'measure_qc_pass'] = False317 318 sample_report = sample_report.where(pd.notnull(sample_report), None)319 return [row.to_dict() for index,row in sample_report.iterrows()]320def _organize_sample_cumulative_percentages(sample_count_densities,min_denominator_count):321 # Make the list of sample count density features in dictionary format322 # make an object to convert pythologist internal count reports to the expected column names323 conv = OrderedDict({324 'region_label':'region_name',325 'phenotype_label':'population_name',326 'frame_count':'image_count',327 'cumulative_numerator':'cumulative_numerator_count',328 'cumulative_denominator':'cumulative_denominator_count',329 'cumulative_fraction':'cumulative_fraction',330 'cumulative_percent':'cumulative_percent'331 })332 keeper_columns = list(conv.values())333 sample_report = sample_count_densities.rename(columns=conv).loc[:,keeper_columns]334 sample_report['measure_qc_pass'] = True335 sample_report.loc[sample_report['cumulative_denominator_count'] < min_denominator_count,'measure_qc_pass'] = False336 337 sample_report = sample_report.replace([np.inf,-np.inf],np.nan)338 sample_report = sample_report.where(pd.notnull(sample_report), None)339 return [row.to_dict() for index,row in sample_report.iterrows()]340def _organize_sample_aggregate_percentages(sample_count_densities,min_denominator_count):341 # Make the list of sample count density features in dictionary format342 # make an object to convert pythologist internal count reports to the expected column names343 conv = OrderedDict({344 'region_label':'region_name',345 'phenotype_label':'population_name',346 'frame_count':'image_count',347 'measured_frame_count':'aggregate_measured_image_count',348 'mean_fraction':'aggregate_mean_fraction',349 'stdev_fraction':'aggregate_stddev_fraction',350 'stderr_fraction':'aggregate_stderr_fraction',351 'mean_percent':'aggregate_mean_percent',352 'stdev_percent':'aggregate_stddev_percent',353 'stderr_percent':'aggregate_stderr_percent'354 })355 keeper_columns = list(conv.values())356 sample_report = sample_count_densities.rename(columns=conv).loc[:,keeper_columns]357 sample_report['measure_qc_pass'] = True358 sample_report.loc[sample_report['aggregate_measured_image_count'] < 1,'measure_qc_pass'] = False359 360 sample_report = sample_report.replace([np.inf,-np.inf],np.nan)361 sample_report = sample_report.where(pd.notnull(sample_report), None)362 return [row.to_dict() for index,row in sample_report.iterrows()]363def do_inputs():364 parser = argparse.ArgumentParser(365 description = "Run the pipeline",366 formatter_class=argparse.ArgumentDefaultsHelpFormatter)367 parser.add_argument('--input_json',required=True,help="The json file defining the run")368 parser.add_argument('--output_json',help="The output of the pipeline")369 parser.add_argument('--verbose',action='store_true',help="Show more about the run")370 parser.add_argument('--cache_directory',help="If set intermediate files will be stored in a directory")371 args = parser.parse_args()372 return args373def external_cmd(cmd):374 """function for calling program by command through a function"""375 cache_argv = sys.argv376 sys.argv = cmd377 args = do_inputs()378 main(args)379 sys.argv = cache_argv380if __name__ == "__main__":...

Full Screen

Full Screen

test_report_aeroo.py

Source:test_report_aeroo.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2# © 2016 Savoir-faire Linux3# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).4import os5import stat6from odoo.exceptions import ValidationError7from odoo.modules import module8from odoo.tests import common9class TestAerooReport(common.SavepointCase):10 @classmethod11 def setUpClass(cls):12 super(TestAerooReport, cls).setUpClass()13 image_path = (14 module.get_module_path('report_aeroo') + '/static/img/logo.png')15 cls.company = cls.env['res.company'].create({16 'name': 'My Company',17 })18 cls.company_2 = cls.env['res.company'].create({19 'name': 'My Company 2',20 })21 cls.partner = cls.env['res.partner'].create({22 'name': 'My Partner',23 'lang': 'en_US',24 'company_id': cls.company.id,25 'image': open(image_path, 'rb').read().encode('base64')26 })27 cls.lang_en = cls.env.ref('base.lang_en').id28 cls.lang_fr = cls.env.ref('base.lang_fr').id29 cls.partner_2 = cls.env['res.partner'].create({30 'name': 'My Partner 2',31 'lang': 'en_US',32 })33 cls.report = cls.env.ref('report_aeroo.aeroo_sample_report_id')34 cls.report.write({35 'attachment': None,36 'attachment_use': False,37 })38 cls.env['ir.config_parameter'].set_param(39 'report_aeroo.libreoffice_location', 'libreoffice')40 cls.env['ir.config_parameter'].set_param(41 'report_aeroo.pdftk_location', 'pdftk')42 cls.env['ir.config_parameter'].set_param(43 'report_aeroo.libreoffice_timeout', '60')44 def test_01_sample_report_doc(self):45 self.report.out_format = self.env.ref(46 'report_aeroo.report_mimetypes_doc_odt')47 self.partner.print_report('sample_report', {})48 def _create_report_line(self, lang, company=None):49 self.report.write({50 'tml_source': 'lang',51 'lang_eval': 'o.lang',52 'out_format': self.env.ref(53 'report_aeroo.report_mimetypes_pdf_odt').id,54 })55 self.report.report_line_ids = [(0, 0, {56 'lang_id': lang,57 'company_id': company,58 'template_source': 'file',59 'template_location': 'report_aeroo/demo/template.odt',60 })]61 def test_02_sample_report_pdf_by_lang(self):62 self._create_report_line(self.lang_en)63 self.partner.print_report('sample_report', {})64 def test_03_sample_report_pdf_with_attachment(self):65 self.report.write({66 'attachment_use': True,67 'attachment': "object.name",68 })69 self.report.out_format = self.env.ref(70 'report_aeroo.report_mimetypes_pdf_odt')71 self.partner.print_report('sample_report', {})72 attachment = self.env['ir.attachment'].search([73 ('res_id', '=', self.partner.id),74 ('res_model', '=', 'res.partner'),75 ('datas_fname', '=', 'My Partner.pdf'),76 ])77 self.assertEqual(len(attachment), 1)78 self.partner.print_report('sample_report', {})79 def test_04_libreoffice_low_timeout(self):80 self.env['ir.config_parameter'].set_param(81 'report_aeroo.libreoffice_timeout', '0.01')82 self.report.out_format = self.env.ref(83 'report_aeroo.report_mimetypes_pdf_odt')84 with self.assertRaises(ValidationError):85 self.partner.print_report('sample_report', {})86 def _set_libreoffice_location(self, filename):87 dir_path = os.path.dirname(os.path.realpath(__file__))88 file_location = 'sh ' + dir_path + '/' + filename89 self.env['ir.config_parameter'].set_param(90 'report_aeroo.libreoffice_location', file_location)91 def test_05_fail_after_10ms(self):92 self._set_libreoffice_location('./sleep_10ms.sh')93 self.report.out_format = self.env.ref(94 'report_aeroo.report_mimetypes_pdf_odt')95 with self.assertRaises(ValidationError):96 self.partner.print_report('sample_report', {})97 def test_06_libreoffice_finish_after_100s(self):98 self._set_libreoffice_location('./libreoffice_100s.sh')99 self.report.out_format = self.env.ref(100 'report_aeroo.report_mimetypes_pdf_odt')101 self.env['ir.config_parameter'].set_param(102 'report_aeroo.libreoffice_timeout', '5')103 with self.assertRaises(ValidationError):104 self.partner.print_report('sample_report', {})105 def test_07_libreoffice_fail(self):106 self._set_libreoffice_location('./libreoffice_fail.sh')107 self.report.out_format = self.env.ref(108 'report_aeroo.report_mimetypes_pdf_odt')109 self.env['ir.config_parameter'].set_param(110 'report_aeroo.libreoffice_timeout', '5')111 with self.assertRaises(ValidationError):112 self.partner.print_report('sample_report', {})113 def test_08_multicompany_context(self):114 self._create_report_line(self.lang_en, self.company.id)115 self.partner.print_report('sample_report', {})116 def test_09_multicompany_context(self):117 self._create_report_line(self.lang_en, self.company.id)118 self.partner.write({'company_id': self.company_2.id})119 with self.assertRaises(ValidationError):120 self.partner.print_report('sample_report', {})121 def test_10_multicompany_context(self):122 self._create_report_line(self.lang_en)123 self.partner.print_report('sample_report', {})124 def test_11_multicompany_context(self):125 self._create_report_line(self.lang_fr)126 with self.assertRaises(ValidationError):127 self.partner.print_report('sample_report', {})128 def test_12_sample_report_pdf_with_multiple_export(self):129 self.report.out_format = self.env.ref(130 'report_aeroo.report_mimetypes_pdf_odt')131 partners = self.partner | self.partner_2132 partners.print_report('sample_report', {})133 def test_13_pdf_low_timeout(self):134 self.env['ir.config_parameter'].set_param(135 'report_aeroo.libreoffice_timeout', '0.01')136 self.report.out_format = self.env.ref(137 'report_aeroo.report_mimetypes_pdf_odt')138 partners = self.partner | self.partner_2139 with self.assertRaises(ValidationError):...

Full Screen

Full Screen

test_report_backend.py

Source:test_report_backend.py Github

copy

Full Screen

...9 load_report_from_file as load_json10from lemoncheesecake.reporting.backend import get_reporting_backend_names, parse_reporting_backend_names_expression11from helpers.report import assert_report12@pytest.fixture()13def sample_report():14 report = Report()15 ts = time.time()16 report.start_time = ts17 report.end_time = ts18 report.saving_time = ts19 return report20def _test_save_report(tmpdir, sample_report, backend, load_func):21 filename = tmpdir.join("report").strpath22 backend.save_report(filename, sample_report)23 report = load_func(filename)24 assert_report(report, sample_report)25def test_save_report_json(tmpdir, sample_report):26 _test_save_report(tmpdir, sample_report, JsonBackend(), load_json)27def _test_load_report(tmpdir, sample_report, save_func):...

Full Screen

Full Screen

get_fastqfiles.py

Source:get_fastqfiles.py Github

copy

Full Screen

1import pandas as pd2import os3from optimalcodon.limsdatacopy import get_fastqs_from_lims4path_to_fastqs = "/n/analysis/Bazzini/arb/MOLNG-2541/HGYJ3BGX9/" # TODO: change this to unix5destination_dir = "rawfastqfiles"6sample_report = os.path.join(path_to_fastqs, "Sample_Report.csv")7sample_report = pd.read_csv(sample_report)8# subset the data for the given experiment9MOLNG_2541 = sample_report[(sample_report.Reference == "danRer10") & (sample_report.Order == "MOLNG-2541")]10MOLNG_2541 = MOLNG_2541.drop_duplicates(['IndexSequence1', 'SampleName'])11for _, row in MOLNG_2541.iterrows():12 destinationname = os.path.join(destination_dir, 'zfishRibo0' + row.SampleName + ".fastq.gz")13 get_fastqs_from_lims(path_to_fastqs, row.IndexSequence1, destinationname)14### MOLNG-254015path_to_fastqs = "/n/analysis/Bazzini/arb/MOLNG-2540/HFVYFBGX9/" # TODO: change this to unix16destination_dir = "rawfastqfiles"17sample_report = os.path.join(path_to_fastqs, "Sample_Report.csv")18sample_report = pd.read_csv(sample_report)19# subset the data for the given experiment20MOLNG_2540 = sample_report[(sample_report.Reference == "danRer10") & (sample_report.Order == "MOLNG-2540")]21MOLNG_2540 = MOLNG_2540.drop_duplicates(['IndexSequence1', 'SampleName'])22for _, row in MOLNG_2540.iterrows():23 destinationname = os.path.join(destination_dir, 'zfishPolyA' + row.SampleName + ".fastq.gz")24 get_fastqs_from_lims(path_to_fastqs, row.IndexSequence1, destinationname)25# MOLNG-2539: Zebrafish_alpha_timecourse26path_to_fastqs = "/n/analysis/Bazzini/arb/MOLNG-2539/HFWJMBGX9/" # TODO: change this to unix27destination_dir = "rawfastqfiles"28sample_report = os.path.join(path_to_fastqs, "Sample_Report.csv")29sample_report = pd.read_csv(sample_report)30# subset the data for the given experiment31MOLNG_2539 = sample_report[(sample_report.Reference == "danRer10") & (sample_report.Order == "MOLNG-2539")]32MOLNG_2539 = MOLNG_2539.drop_duplicates(['IndexSequence1', 'SampleName'])33for _, row in MOLNG_2539.iterrows():34 destinationname = os.path.join(destination_dir, 'TreatedAamanitin_zfishPolyA' + row.SampleName + ".fastq.gz")...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Lemoncheesecake automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful