How to use combine method in avocado

Best Python code snippet using avocado_python

bidirectional_feature_pyramid_generators_tf2_test.py

Source:bidirectional_feature_pyramid_generators_tf2_test.py Github

copy

Full Screen

1# Copyright 2020 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""Tests for bidirectional feature pyramid generators."""16import unittest17from absl.testing import parameterized18import tensorflow.compat.v1 as tf19from google.protobuf import text_format20from object_detection.builders import hyperparams_builder21from object_detection.models import bidirectional_feature_pyramid_generators as bifpn_generators22from object_detection.protos import hyperparams_pb223from object_detection.utils import test_case24from object_detection.utils import test_utils25from object_detection.utils import tf_version26@parameterized.parameters({'bifpn_num_iterations': 2},27 {'bifpn_num_iterations': 8})28@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')29class BiFPNFeaturePyramidGeneratorTest(test_case.TestCase):30 def _build_conv_hyperparams(self):31 conv_hyperparams = hyperparams_pb2.Hyperparams()32 conv_hyperparams_text_proto = """33 regularizer {34 l2_regularizer {35 }36 }37 initializer {38 truncated_normal_initializer {39 }40 }41 force_use_bias: true42 """43 text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)44 return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)45 def test_get_expected_feature_map_shapes(self, bifpn_num_iterations):46 with test_utils.GraphContextOrNone() as g:47 image_features = [48 ('block3', tf.random_uniform([4, 16, 16, 256], dtype=tf.float32)),49 ('block4', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),50 ('block5', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32))51 ]52 bifpn_generator = bifpn_generators.KerasBiFpnFeatureMaps(53 bifpn_num_iterations=bifpn_num_iterations,54 bifpn_num_filters=128,55 fpn_min_level=3,56 fpn_max_level=7,57 input_max_level=5,58 is_training=True,59 conv_hyperparams=self._build_conv_hyperparams(),60 freeze_batchnorm=False)61 def graph_fn():62 feature_maps = bifpn_generator(image_features)63 return feature_maps64 expected_feature_map_shapes = {65 '{}_dn_lvl_3'.format(bifpn_num_iterations): (4, 16, 16, 128),66 '{}_up_lvl_4'.format(bifpn_num_iterations): (4, 8, 8, 128),67 '{}_up_lvl_5'.format(bifpn_num_iterations): (4, 4, 4, 128),68 '{}_up_lvl_6'.format(bifpn_num_iterations): (4, 2, 2, 128),69 '{}_up_lvl_7'.format(bifpn_num_iterations): (4, 1, 1, 128)}70 out_feature_maps = self.execute(graph_fn, [], g)71 out_feature_map_shapes = dict(72 (key, value.shape) for key, value in out_feature_maps.items())73 self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)74 def test_get_expected_variable_names(self, bifpn_num_iterations):75 with test_utils.GraphContextOrNone() as g:76 image_features = [77 ('block3', tf.random_uniform([4, 16, 16, 256], dtype=tf.float32)),78 ('block4', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),79 ('block5', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32))80 ]81 bifpn_generator = bifpn_generators.KerasBiFpnFeatureMaps(82 bifpn_num_iterations=bifpn_num_iterations,83 bifpn_num_filters=128,84 fpn_min_level=3,85 fpn_max_level=7,86 input_max_level=5,87 is_training=True,88 conv_hyperparams=self._build_conv_hyperparams(),89 freeze_batchnorm=False,90 name='bifpn')91 def graph_fn():92 return bifpn_generator(image_features)93 self.execute(graph_fn, [], g)94 expected_variables = [95 'bifpn/node_00/0_up_lvl_6/input_0_up_lvl_5/1x1_pre_sample/conv/bias',96 'bifpn/node_00/0_up_lvl_6/input_0_up_lvl_5/1x1_pre_sample/conv/kernel',97 'bifpn/node_03/1_dn_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/bias',98 'bifpn/node_03/1_dn_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/kernel',99 'bifpn/node_04/1_dn_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/bias',100 'bifpn/node_04/1_dn_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/kernel',101 'bifpn/node_05/1_dn_lvl_3/input_0_up_lvl_3/1x1_pre_sample/conv/bias',102 'bifpn/node_05/1_dn_lvl_3/input_0_up_lvl_3/1x1_pre_sample/conv/kernel',103 'bifpn/node_06/1_up_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/bias',104 'bifpn/node_06/1_up_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/kernel',105 'bifpn/node_07/1_up_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/bias',106 'bifpn/node_07/1_up_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/kernel']107 expected_node_variable_patterns = [108 ['bifpn/node_{:02}/{}_dn_lvl_6/combine/bifpn_combine_weights',109 'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/bias',110 'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/depthwise_kernel',111 'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/pointwise_kernel'],112 ['bifpn/node_{:02}/{}_dn_lvl_5/combine/bifpn_combine_weights',113 'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/bias',114 'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/depthwise_kernel',115 'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/pointwise_kernel'],116 ['bifpn/node_{:02}/{}_dn_lvl_4/combine/bifpn_combine_weights',117 'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/bias',118 'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/depthwise_kernel',119 'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/pointwise_kernel'],120 ['bifpn/node_{:02}/{}_dn_lvl_3/combine/bifpn_combine_weights',121 'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/bias',122 'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/depthwise_kernel',123 'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/pointwise_kernel'],124 ['bifpn/node_{:02}/{}_up_lvl_4/combine/bifpn_combine_weights',125 'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/bias',126 'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/depthwise_kernel',127 'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/pointwise_kernel'],128 ['bifpn/node_{:02}/{}_up_lvl_5/combine/bifpn_combine_weights',129 'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/bias',130 'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/depthwise_kernel',131 'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/pointwise_kernel'],132 ['bifpn/node_{:02}/{}_up_lvl_6/combine/bifpn_combine_weights',133 'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/bias',134 'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/depthwise_kernel',135 'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/pointwise_kernel'],136 ['bifpn/node_{:02}/{}_up_lvl_7/combine/bifpn_combine_weights',137 'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/bias',138 'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/depthwise_kernel',139 'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/pointwise_kernel']]140 node_i = 2141 for iter_i in range(1, bifpn_num_iterations+1):142 for node_variable_patterns in expected_node_variable_patterns:143 for pattern in node_variable_patterns:144 expected_variables.append(pattern.format(node_i, iter_i))145 node_i += 1146 expected_variables = set(expected_variables)147 actual_variable_set = set(148 [var.name.split(':')[0] for var in bifpn_generator.variables])149 self.assertSetEqual(expected_variables, actual_variable_set)150# TODO(aom): Tests for create_bifpn_combine_op.151if __name__ == '__main__':...

Full Screen

Full Screen

AnalysisData.py

Source:AnalysisData.py Github

copy

Full Screen

1'''2Created on 2015/11/33@author: FZY4'''5import pandas as pd6import numpy as np7from MissingValueByPrediction import TunningParamter,Ridge_param,Lasso_param,skl_lr_param8from MissingValueByPrediction import xgb_regression_param,xgb_tree_param,xgb_regression_param_by_tree9from sklearn.datasets import dump_svmlight_file10def getNullMessage(data,name):11 rows = data.shape[0]12 messages = ""13 message = ""14 rateMessages = ""15 for col in data.columns:16 rate = data[np.isnan(data[col])].shape[0]/float(rows)17 message = str(col) +":" +str(rate)18 if rate > 0.1 :19 rateMessages = rateMessages + ":" +message + "\n"20 messages = messages + message + '\n'21 f = open("../../data/analysis/%s.null.analysi.txt"%(name),"wb")22 f.write(messages)23 f.write("----------------------speical message--------------------\n")24 f.write(rateMessages)25 f.close()26#plot the picture of missing data27def plotDataScatter(data,features,name): 28 for feature in features :29 data = data[ ~ np.isnan(data[feature])]30 p = data.plot(kind = 'scatter',x='Id',y=feature).get_figure()31 p.savefig("../../data/img/%s.%s.png"%(name,feature)) 32 33def dumpDatatoSVMFormat(data,X_feature,Y_feature,filename):34 dump_svmlight_file(data[X_feature], data[Y_feature], "../../data/analysis/svm/%s"%(filename))35 36#plot the picture of category data37if __name__ == '__main__':38 #train = pd.read_csv("../../data/train_clean_fill_F10.csv")39 #test = pd.read_csv("../../data/test_clean_fill_F10.csv")40 #analysis the data 41 #get the columns for analysis42 #np.savetxt("../../data/analysis/train_columns.name.txt",train.columns, fmt="%s")43 #np.savetxt("../../data/analysis/test_columns.name.txt",test.columns,fmt="%s")44 45 #1.now we need to count the null46 #getNullMessage(train, "train")47 #getNullMessage(test, "test")48 49 #2.now we need to analysis the scatter of data50 #features = ['Feature_1','Feature_2','Feature_4','Feature_10','Feature_20']51 """52 features = []53 for i in range(1,26):54 feature = "Feature_" + str(i)55 features.append(feature)56 plotDataScatter(train, features, "train")57 """58 #plotDataScatter(train, features, "train")59 #plotDataScatter(test, features,"test")60 61 #3.the feature1 and feature10 feat20 is category feature62 #train[['Feature_1','Feature_10','Feature_20']].hist()63 #test[['Feature_1','Feature_10','Feature_20']].hist()64 65 #4.now we need to analysis66 #1.for the Feature_2 there are 22.8% have na value ,so we use regreesion to predict 67 #the vlaue 68 """69 train_use = train[~ np.isnan(train['Feature_2'])]70 test_use = test[~ np.isnan(test['Feature_2'])]71 features = []72 for i in range(1,26):73 feature = "Feature_" + str(i)74 features.append(feature)75 #drop the feature 76 nullFeatures = ['Feature_1','Feature_4','Feature_10','Feature_20']77 features = list(set(features) -set(nullFeatures))78 #use Ridge to predict the bestValue79 #we need to combine the train and test validate our model80 pieces = [train_use[features],test_use[features]]81 data_combine = pd.concat(pieces)82 data_combine.to_csv("../../data/data_combine.csv")83 """84 #Feature_2 has 22.8% na values 85 #data_combine = pd.read_csv("../../data/data_combine.csv")86 #TunningParamter(Ridge_param, data_combine , data_combine.columns,'Feature_2')87 #TunningParamter(Lasso_param, data_combine, data_combine.columns, 'Feature_2')88 #we use the regression to predict the model ,but the result do not have good result89 #I will add the reture value feature to regression90 91 """92 train_use = train[~ np.isnan(train['Feature_2'])]93 test_use = test[~ np.isnan(test['Feature_2'])]94 features = []95 for i in range(1,26):96 feature = "Feature_" + str(i)97 features.append(feature)98 99 # add reture value100 #drop the feature 101 nullFeatures = ['Feature_1','Feature_4','Feature_10','Feature_20']102 features = list(set(features) -set(nullFeatures))103 features = features + ['Ret_MinusTwo','Ret_MinusOne']104 for i in range(2,121):105 feature= "Ret_" + str(i)106 features.append(feature)107 108 print features109 #use Ridge to predict the bestValue110 #we need to combine the train and test validate our model111 pieces = [train_use[features],test_use[features]]112 data_combine = pd.concat(pieces)113 data_combine.to_csv("../../data/data_combine_1.csv")114 115 data_combine = pd.read_csv("../../data/data_combine_1.csv") 116 TunningParamter(Ridge_param, data_combine , data_combine.columns,'Feature_2','combine_1')117 TunningParamter(Lasso_param, data_combine, data_combine.columns, 'Feature_2','combine_1')118 """119 #now we need to deal with Feature_4120 """121 train_use = train[~ np.isnan(train['Feature_4'])]122 test_use = test[~ np.isnan(test['Feature_4'])]123 features = []124 for i in range(1,26):125 feature = "Feature_" + str(i)126 features.append(feature)127 128 nullFeatures = ['Feature_1','Feature_2','Feature_10','Feature_20']129 features = list(set(features) -set(nullFeatures))130 pieces = [train_use[features],test_use[features]]131 data_combine = pd.concat(pieces)132 data_combine.to_csv("../../data/data_combine_2.csv")133 """134 #data_combine = pd.read_csv("../../data/data_combine_2.csv") 135 #dumpDatatoSVMFormat(data_combine, X_feature, Y_feature, 'combine_2') 136 #TunningParamter(Ridge_param, data_combine , data_combine.columns,'Feature_4','combine_2')137 #TunningParamter(Lasso_param, data_combine, data_combine.columns, 'Feature_4','combine_2')138 139 140 #for the feature_20 ,50% na 141 """142 train_use = train[~ np.isnan(train['Feature_1'])]143 test_use = test[~ np.isnan(test['Feature_1'])]144 features = []145 for i in range(1,26):146 feature = "Feature_" + str(i)147 features.append(feature)148 149 nullFeatures = ['Feature_10','Feature_2','Feature_20','Feature_4']150 features = list(set(features) -set(nullFeatures))151 features = features + ['Ret_MinusTwo','Ret_MinusOne']152 pieces = [train_use[features],test_use[features]]153 data_combine = pd.concat(pieces)154 data_combine.to_csv("../../data/data_combine_5.csv")155 156 data_combine = pd.read_csv("../../data/data_combine_5.csv") 157 TunningParamter(skl_lr_param, data_combine , data_combine.columns,'Feature_1','combine_5')158 """159 """160 train_use = train[~ np.isnan(train['Feature_20'])]161 test_use = test[~ np.isnan(test['Feature_20'])]162 features = []163 for i in range(1,26):164 feature = "Feature_" + str(i)165 features.append(feature)166 nullFeatures = ['Feature_2','Feature_4','Feature_1']167 features = list(set(features) -set(nullFeatures))168 features = features + ['Ret_MinusTwo','Ret_MinusOne']169 pieces = [train_use[features],test_use[features]]170 data_combine = pd.concat(pieces)171 data_combine['Feature_20'] = data_combine['Feature_20'] - 2 172 173 TunningParamter(xgb_tree_param, data_combine, data_combine.columns,'Feature_20','data_combine_20_feature')174 """175 ##now we need to get range of Ret_1 ~ Ret_180176 """177 train = pd.read_csv("../../data/train_weight.csv")178 features = []179 for i in range(2,181):180 feature = "Ret_"+str(i)181 features.append(feature)182 #plot scatter183 plotDataScatter(train,features,'train')184 """185 train = pd.read_csv("../../data/train_clean_fill_all_1.csv")186 getNullMessage(train, 'train_2')187 188 189 190 191 192 ...

Full Screen

Full Screen

test_combinations_test.py

Source:test_combinations_test.py Github

copy

Full Screen

...20from absl.testing import parameterized21from tensorflow.python.framework import test_combinations as combinations22from tensorflow.python.eager import test23class TestingCombinationsTest(test.TestCase):24 def test_combine(self):25 self.assertEqual([{26 "a": 1,27 "b": 228 }, {29 "a": 1,30 "b": 331 }, {32 "a": 2,33 "b": 234 }, {35 "a": 2,36 "b": 337 }], combinations.combine(a=[1, 2], b=[2, 3]))38 def test_arguments_sorted(self):39 self.assertEqual([40 OrderedDict([("aa", 1), ("ab", 2)]),41 OrderedDict([("aa", 1), ("ab", 3)]),42 OrderedDict([("aa", 2), ("ab", 2)]),43 OrderedDict([("aa", 2), ("ab", 3)])44 ], combinations.combine(ab=[2, 3], aa=[1, 2]))45 def test_combine_single_parameter(self):46 self.assertEqual([{47 "a": 1,48 "b": 249 }, {50 "a": 2,51 "b": 252 }], combinations.combine(a=[1, 2], b=2))53 def test_add(self):54 self.assertEqual(55 [{56 "a": 157 }, {58 "a": 259 }, {60 "b": 261 }, {62 "b": 363 }],64 combinations.combine(a=[1, 2]) + combinations.combine(b=[2, 3]))65 def test_times(self):66 c1 = combinations.combine(mode=["graph"], loss=["callable", "tensor"])67 c2 = combinations.combine(mode=["eager"], loss=["callable"])68 c3 = combinations.combine(distribution=["d1", "d2"])69 c4 = combinations.times(c3, c1 + c2)70 self.assertEqual([71 OrderedDict([("distribution", "d1"), ("loss", "callable"),72 ("mode", "graph")]),73 OrderedDict([("distribution", "d1"), ("loss", "tensor"),74 ("mode", "graph")]),75 OrderedDict([("distribution", "d1"), ("loss", "callable"),76 ("mode", "eager")]),77 OrderedDict([("distribution", "d2"), ("loss", "callable"),78 ("mode", "graph")]),79 OrderedDict([("distribution", "d2"), ("loss", "tensor"),80 ("mode", "graph")]),81 OrderedDict([("distribution", "d2"), ("loss", "callable"),82 ("mode", "eager")])83 ], c4)84 def test_times_variable_arguments(self):85 c1 = combinations.combine(mode=["graph", "eager"])86 c2 = combinations.combine(optimizer=["adam", "gd"])87 c3 = combinations.combine(distribution=["d1", "d2"])88 c4 = combinations.times(c3, c1, c2)89 self.assertEqual([90 OrderedDict([("distribution", "d1"), ("mode", "graph"),91 ("optimizer", "adam")]),92 OrderedDict([("distribution", "d1"), ("mode", "graph"),93 ("optimizer", "gd")]),94 OrderedDict([("distribution", "d1"), ("mode", "eager"),95 ("optimizer", "adam")]),96 OrderedDict([("distribution", "d1"), ("mode", "eager"),97 ("optimizer", "gd")]),98 OrderedDict([("distribution", "d2"), ("mode", "graph"),99 ("optimizer", "adam")]),100 OrderedDict([("distribution", "d2"), ("mode", "graph"),101 ("optimizer", "gd")]),102 OrderedDict([("distribution", "d2"), ("mode", "eager"),103 ("optimizer", "adam")]),104 OrderedDict([("distribution", "d2"), ("mode", "eager"),105 ("optimizer", "gd")])106 ], c4)107 self.assertEqual(108 combinations.combine(109 mode=["graph", "eager"],110 optimizer=["adam", "gd"],111 distribution=["d1", "d2"]), c4)112 def test_overlapping_keys(self):113 c1 = combinations.combine(mode=["graph"], loss=["callable", "tensor"])114 c2 = combinations.combine(mode=["eager"], loss=["callable"])115 with self.assertRaisesRegexp(ValueError, ".*Keys.+overlap.+"):116 _ = combinations.times(c1, c2)117@combinations.generate(combinations.combine(a=[1, 0], b=[2, 3], c=[1]))118class CombineTheTestSuite(parameterized.TestCase):119 def test_add_things(self, a, b, c):120 self.assertLessEqual(3, a + b + c)121 self.assertLessEqual(a + b + c, 5)122 def test_add_things_one_more(self, a, b, c):123 self.assertLessEqual(3, a + b + c)124 self.assertLessEqual(a + b + c, 5)125 def not_a_test(self, a=0, b=0, c=0):126 del a, b, c127 self.fail()128 def _test_but_private(self, a=0, b=0, c=0):129 del a, b, c130 self.fail()131 # Check that nothing funny happens to a non-callable that starts with "_test"....

Full Screen

Full Screen

transcript_two_genome_MCscan.py

Source:transcript_two_genome_MCscan.py Github

copy

Full Screen

1#!/usr/bin/python32import sys3import os4import mykit5import re6import subprocess7sysname1=sys.argv[1]8sysname2=sys.argv[2]9transcript_prefix=sys.argv[3]10outdir=sys.argv[4]11threads=sys.argv[5]12def genome_transcripts_MC(sysname1,sysname2):13 pwd=subprocess.getoutput('pwd')14 os.system("cat {sysname1}_{sysname2}.blast.result | awk '$3>98' | awk '$4>5000' > {sysname1}_{sysname2}.98_5000.blast.result".format(sysname1=sysname1,sysname2=sysname2))15 os.system("cat {sysname2}_{sysname1}.blast.result | awk '$3>98' | awk '$4>5000' > {sysname2}_{sysname1}.98_5000.blast.result".format(sysname1=sysname1,sysname2=sysname2))16 os.system('cp MCscanX_{sysname1}_{sysname2}.gff MCscanX_{sysname2}_{sysname1}.gff'.format(sysname1=sysname1,sysname2=sysname2))17 combine_list=[sysname1+'_'+sysname2,sysname2+'_'+sysname1]18 for combine in combine_list:19 if mykit.extfile(combine):20 os.system('mkdir '+combine)21 os.system('mkdir '+combine+'/query')22 os.system('mkdir '+combine+'/db')23 with open('{combine}.98_5000.blast.result'.format(combine=combine)) as f:24 blastmsg_list=f.readlines()25 query_chromosome_list=[]26 query_msg_dict={}27 db_chromosome_list=[]28 db_msg_dict={}29 for blastmsg in blastmsg_list:30 query_chr=blastmsg.split('|')[1].split(':')[0]31 db_chr=blastmsg.split('|')[2].split(':')[0]32 if query_chr not in query_chromosome_list:33 query_chromosome_list.append(query_chr)34 query_msg_dict[query_chr]=[]35 if db_chr not in db_chromosome_list:36 db_chromosome_list.append(db_chr)37 db_msg_dict[db_chr]=[]38 query_msg_dict[query_chr].append(blastmsg)39 db_msg_dict[db_chr].append(blastmsg)40 for query_chr in query_msg_dict:41 output=open('{combine}/query/{query_chr}.blast'.format(combine=combine,query_chr=query_chr),'w')42 circos_ctl_list=[]43 for msg in query_msg_dict[query_chr]:44 output.write(msg)45 output.close()46 os.system('cp MCscanX_{sysname1}_{sysname2}.gff {combine}/query/{query_chr}.gff'.format(sysname1=sysname1,sysname2=sysname2,combine=combine,query_chr=query_chr))47 os.system('MCScanX {combine}/query/{query_chr} &> /dev/null'.format(combine=combine,query_chr=query_chr))48 for msg in open('{combine}/query/{query_chr}.collinearity'.format(combine=combine,query_chr=query_chr)):49 if msg.startswith('#'):50 continue51 chr1=msg.split('|')[1].split(':')[0]52 chr2=msg.split('|')[2].split(':')[0]53 if chr1 not in circos_ctl_list:54 circos_ctl_list.append(chr1)55 if chr2 not in circos_ctl_list:56 circos_ctl_list.append(chr2)57 with open('{combine}/query/{query_chr}.dot.ctl'.format(combine=combine,query_chr=query_chr),'w') as f:58 f.write('2000\n')59 writes=''60 for nchr in circos_ctl_list:61 writes+=nchr+','62 writes=re.sub(',$','',writes)63 f.write(writes)64 os.chdir('/home/a2431559261/soft/MCScanX/downstream_analyses')65 os.system('java circle_plotter -g {pwd}/MCscanX_{combine}.gff -s {pwd}/{combine}/query/{query_chr}.collinearity -c {pwd}/{combine}/query/{query_chr}.dot.ctl -o {pwd}/{combine}/query/{query_chr}.dot.PNG &> /dev/null'.format(pwd=pwd,combine=combine,query_chr=query_chr))66 os.chdir(pwd)67 for db_chr in db_msg_dict:68 output=open('{combine}/db/{db_chr}.blast'.format(combine=combine,db_chr=db_chr),'w')69 circos_ctl_list=[]70 for msg in db_msg_dict[db_chr]:71 output.write(msg)72 output.close()73 os.system('cp MCscanX_{sysname1}_{sysname2}.gff {combine}/db/{db_chr}.gff'.format(sysname1=sysname1,sysname2=sysname2,db_chr=db_chr,combine=combine))74 os.system('MCScanX {combine}/db/{db_chr} &> /dev/null'.format(combine=combine,db_chr=db_chr))75 for msg in open('{combine}/db/{db_chr}.collinearity'.format(combine=combine,db_chr=db_chr)):76 if msg.startswith('#'):77 continue78 chr1=msg.split('|')[1].split(':')[0]79 chr2=msg.split('|')[2].split(':')[0]80 if chr1 not in circos_ctl_list:81 circos_ctl_list.append(chr1)82 if chr2 not in circos_ctl_list:83 circos_ctl_list.append(chr2)84 with open('{combine}/db/{db_chr}.dot.ctl'.format(combine=combine,db_chr=db_chr),'w') as f:85 f.write('2000\n')86 writes=''87 for nchr in circos_ctl_list:88 writes+=nchr+','89 writes=re.sub(',$','',writes)90 f.write(writes)91 os.chdir('/home/a2431559261/OLDISK/soft/MCScanX/downstream_analyses')92 os.system('java circle_plotter -g {pwd}/MCscanX_{combine}.gff -s {pwd}/{combine}/db/{db_chr}.collinearity -c {pwd}/{combine}/db/{db_chr}.dot.ctl -o {pwd}/{combine}/db/{db_chr}.dot.PNG &> /dev/null'.format(pwd=pwd,combine=combine,db_chr=db_chr))93 os.chdir(pwd)94mykit.genome.genome_transcript_circos(sysname1,sysname2,transcript_prefix,outdir,threads)95genome_transcripts_MC(sysname1,sysname2)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful