How to use create_activation method in localstack

Best Python code snippet using localstack_python

layers_factory.py

Source:layers_factory.py Github

copy

Full Screen

...65 input_dim=input_dim,66 output_dim=int(layer_info['output_dim']),67 dropout=parse_as_bool(layer_info['dropout']),68 sparse_inputs=parse_as_bool(layer_info['sparse_inputs']),69 act=create_activation(layer_info['act']),70 bias=parse_as_bool(layer_info['bias']),71 featureless=False,72 num_supports=1)73def create_GraphConvolutionAttention_layer(layer_info, model, layer_id):74 if not 5 <= len(layer_info) <= 6:75 raise RuntimeError('GraphConvolution layer must have 3-4 specs')76 input_dim = layer_info.get('input_dim')77 if not input_dim:78 if layer_id != 1:79 raise RuntimeError(80 'The input dim for layer must be specified'.format(layer_id))81 input_dim = model.input_dim82 else:83 input_dim = int(input_dim)84 return GraphConvolutionAttention(85 input_dim=input_dim,86 output_dim=int(layer_info['output_dim']),87 dropout=parse_as_bool(layer_info['dropout']),88 sparse_inputs=parse_as_bool(layer_info['sparse_inputs']),89 act=create_activation(layer_info['act']),90 bias=parse_as_bool(layer_info['bias']),91 featureless=False,92 num_supports=1)93def create_Coarsening_layer(layer_info):94 if not len(layer_info) == 1:95 raise RuntimeError('Coarsening layer must have 1 spec')96 return Coarsening(pool_style=layer_info['pool_style'])97def create_Average_layer(layer_info):98 if not len(layer_info) == 0:99 raise RuntimeError('Average layer must have 0 specs')100 return Average()101def create_Attention_layer(layer_info):102 if not len(layer_info) == 5:103 raise RuntimeError('Attention layer must have 5 specs')104 return Attention(input_dim=int(layer_info['input_dim']),105 att_times=int(layer_info['att_times']),106 att_num=int(layer_info['att_num']),107 att_style=layer_info['att_style'],108 att_weight=parse_as_bool(layer_info['att_weight']))109def create_Dot_layer(layer_info):110 if not len(layer_info) == 1:111 raise RuntimeError('Dot layer must have 1 specs')112 return Dot(output_dim=int(layer_info['output_dim']))113def create_SLM_layer(layer_info):114 if not len(layer_info) == 5:115 raise RuntimeError('SLM layer must have 5 specs')116 return SLM(117 input_dim=int(layer_info['input_dim']),118 output_dim=int(layer_info['output_dim']),119 act=create_activation(layer_info['act']),120 dropout=parse_as_bool(layer_info['dropout']),121 bias=parse_as_bool(layer_info['bias']))122def create_NTN_layer(layer_info):123 if not len(layer_info) == 6:124 raise RuntimeError('NTN layer must have 6 specs')125 return NTN(126 input_dim=int(layer_info['input_dim']),127 feature_map_dim=int(layer_info['feature_map_dim']),128 dropout=parse_as_bool(layer_info['dropout']),129 inneract=create_activation(layer_info['inneract']),130 apply_u=parse_as_bool(layer_info['apply_u']),131 bias=parse_as_bool(layer_info['bias']))132def create_ANPM_layer(layer_info):133 if not len(layer_info) == 14:134 raise RuntimeError('ANPM layer must have 14 specs')135 return ANPM(136 input_dim=int(layer_info['input_dim']),137 att_times=int(layer_info['att_times']),138 att_num=int(layer_info['att_num']),139 att_style=layer_info['att_style'],140 att_weight=parse_as_bool(layer_info['att_weight']),141 feature_map_dim=int(layer_info['feature_map_dim']),142 dropout=parse_as_bool(layer_info['dropout']),143 bias=parse_as_bool(layer_info['bias']),144 ntn_inneract=create_activation(layer_info['ntn_inneract']),145 apply_u=parse_as_bool(layer_info['apply_u']),146 padding_value=int(layer_info['padding_value']),147 mne_inneract=create_activation(layer_info['mne_inneract']),148 # num_bins=int(layer_info['num_bins'])149 mne_method=layer_info['mne_method'],150 branch_style=layer_info['branch_style'])151def create_ANPMD_layer(layer_info):152 if not len(layer_info) == 22:153 raise RuntimeError('ANPMD layer must have 22 specs')154 return ANPMD(155 input_dim=int(layer_info['input_dim']),156 att_times=int(layer_info['att_times']),157 att_num=int(layer_info['att_num']),158 att_style=layer_info['att_style'],159 att_weight=parse_as_bool(layer_info['att_weight']),160 feature_map_dim=int(layer_info['feature_map_dim']),161 dropout=parse_as_bool(layer_info['dropout']),162 bias=parse_as_bool(layer_info['bias']),163 ntn_inneract=create_activation(layer_info['ntn_inneract']),164 apply_u=parse_as_bool(layer_info['apply_u']),165 padding_value=int(layer_info['padding_value']),166 mne_inneract=create_activation(layer_info['mne_inneract']),167 mne_method=layer_info['mne_method'],168 branch_style=layer_info['branch_style'],169 dense1_dropout=parse_as_bool(layer_info['dense1_dropout']),170 dense1_act=create_activation(layer_info['dense1_act']),171 dense1_bias=parse_as_bool(layer_info['dense1_bias']),172 dense1_output_dim=int(layer_info['dense1_output_dim']),173 dense2_dropout=parse_as_bool(layer_info['dense2_dropout']),174 dense2_act=create_activation(layer_info['dense2_act']),175 dense2_bias=parse_as_bool(layer_info['dense2_bias']),176 dense2_output_dim=int(layer_info['dense2_output_dim']))177def create_ANNH_layer(layer_info):178 if not len(layer_info) == 14:179 raise RuntimeError('ANNH layer must have 14 specs')180 return ANNH(181 input_dim=int(layer_info['input_dim']),182 att_times=int(layer_info['att_times']),183 att_num=int(layer_info['att_num']),184 att_style=layer_info['att_style'],185 att_weight=parse_as_bool(layer_info['att_weight']),186 feature_map_dim=int(layer_info['feature_map_dim']),187 dropout=parse_as_bool(layer_info['dropout']),188 bias=parse_as_bool(layer_info['bias']),189 ntn_inneract=create_activation(layer_info['ntn_inneract']),190 apply_u=parse_as_bool(layer_info['apply_u']),191 padding_value=int(layer_info['padding_value']),192 mne_inneract=create_activation(layer_info['mne_inneract']),193 # num_bins=int(layer_info['num_bins'])194 mne_method=layer_info['mne_method'],195 branch_style=layer_info['branch_style'])196def create_Dense_layer(layer_info):197 if not len(layer_info) == 5:198 raise RuntimeError('Dense layer must have 5 specs')199 return Dense(200 input_dim=int(layer_info['input_dim']),201 output_dim=int(layer_info['output_dim']),202 dropout=parse_as_bool(layer_info['dropout']),203 act=create_activation(layer_info['act']),204 bias=parse_as_bool(layer_info['bias']))205def create_Padding_layer(layer_info):206 if not len(layer_info) == 1:207 raise RuntimeError('Padding layer must have 1 specs')208 return Padding(209 padding_value=int(layer_info['padding_value']))210def create_MNE_layer(layer_info):211 if not len(layer_info) == 3:212 raise RuntimeError('MNE layer must have 3 specs')213 return MNE(214 input_dim=int(layer_info['input_dim']),215 dropout=parse_as_bool(layer_info['dropout']),216 inneract=create_activation(layer_info['inneract']))217def create_CNN_layer(layer_info):218 if not len(layer_info) == 11:219 raise RuntimeError('CNN layer must have 9 specs')220 return CNN(221 start_cnn=parse_as_bool(layer_info['start_cnn']),222 end_cnn=parse_as_bool(layer_info['end_cnn']),223 window_size=int(layer_info['window_size']),224 kernel_stride=int(layer_info['kernel_stride']),225 in_channel=int(layer_info['in_channel']),226 out_channel=int(layer_info['out_channel']),227 padding=layer_info['padding'],228 pool_size=int(layer_info['pool_size']),229 dropout=parse_as_bool(layer_info['dropout']),230 act=create_activation(layer_info['act']),231 bias=parse_as_bool(layer_info['bias']))232def create_activation(act, sim_kernel=None, use_tf=True):233 if act == 'relu':234 return tf.nn.relu if use_tf else relu_np235 elif act == 'identity':236 return tf.identity if use_tf else identity_np237 elif act == 'sigmoid':238 return tf.sigmoid if use_tf else sigmoid_np239 elif act == 'tanh':240 return tf.tanh if use_tf else np.tanh241 elif act == 'sim_kernel':242 return sim_kernel.dist_to_sim_tf if use_tf else \243 sim_kernel.dist_to_sim_np244 else:245 raise RuntimeError('Unknown activation function {}'.format(act))246def relu_np(x):...

Full Screen

Full Screen

MRA_Model.py

Source:MRA_Model.py Github

copy

Full Screen

...4import numpy as np5import cv26from tensorflow.keras import models,layers7from wavetf import WaveTFFactory8def create_activation():9 return layers.ReLU()10def sigmoid_activation():11 return layers.Softmax()12def create_model():13 input_1 = layers.Input(batch_input_shape=(4,128,128,3))14 enc_1 = layers.Conv2D(64,kernel_size=(3,3),padding="same",kernel_initializer = tf.random_normal_initializer(stddev=0.01))(input_1)15 enc_1 = create_activation()(enc_1)16 enc_1 = layers.Conv2D(64,kernel_size=(3,3),padding="same",kernel_initializer = tf.random_normal_initializer(stddev=0.01))(enc_1)17 enc_1 = create_activation()(enc_1) #This will be used in skip connection 118 maxpool_1 = layers.MaxPooling2D((2,2))(enc_1)19 ######################################20 '''21 Take the DL1 block here and concatenate with the downstream layer22 '''23 w = WaveTFFactory().build('db2', dim=2)24 dl1_input = w.call(tf.expand_dims(input_1[:,:,:,0],axis=-1)) #Calculate Wavelet Decomposition on only gray scale image25 dl1_enc = layers.Conv2D(64,kernel_size=(3,3),padding="same",kernel_initializer = tf.random_normal_initializer(stddev=0.01))(dl1_input)26 dl1_enc = create_activation()(dl1_enc)27 dl1_enc = layers.Conv2D(64,kernel_size=(3,3),padding="same",kernel_initializer = tf.random_normal_initializer(stddev=0.01))(dl1_enc)28 dl1_enc = create_activation()(dl1_enc)29 #Concatenate with main mode i.e output of maxpool_130 concat_1 = tf.concat([maxpool_1,dl1_enc],axis=-1)31 enc_2 = layers.Conv2D(128,kernel_size=(3,3),padding="same",kernel_initializer = tf.random_normal_initializer(stddev=0.01))(concat_1)32 enc_2 = create_activation()(enc_2)33 enc_2 = layers.Conv2D(128, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(enc_2)34 enc_2 = create_activation()(enc_2) #This will be used in skip connection 235 maxpool_2 = layers.MaxPooling2D((2,2))(enc_2)36 ######################################37 '''38 Take the DL2 block here and concatenate with the downstream layer39 '''40 dl2_input = w.call(tf.expand_dims(dl1_input[:, :, :, 0], axis=-1)) # Calculate Wavelet Decomposition on only channel 0 of dl1_input41 dl2_enc = layers.Conv2D(128, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(dl2_input)42 dl2_enc = create_activation()(dl2_enc)43 dl2_enc = layers.Conv2D(128, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(dl2_enc)44 dl2_enc = create_activation()(dl2_enc)45 concat_2 = tf.concat([maxpool_2,dl2_enc],axis=-1)46 enc_3 = layers.Conv2D(256, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(concat_2)47 enc_3 = create_activation()(enc_3)48 enc_3 = layers.Conv2D(256, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(enc_3)49 enc_3 = create_activation()(enc_3) # This will be used in skip connection 350 maxpool_3 = layers.MaxPooling2D((2, 2))(enc_3)51 enc_4 = layers.Conv2D(512, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(maxpool_3)52 enc_4 = create_activation()(enc_4)53 enc_4 = layers.Conv2D(512, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(enc_4)54 enc_4 = create_activation()(enc_4) # This will be used in skip connection 455 maxpool_4 = layers.MaxPooling2D((2, 2))(enc_4)56 enc_5 = layers.Conv2D(1024, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(maxpool_4)57 enc_5 = create_activation()(enc_5)58 enc_5 = layers.Conv2D(1024, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(enc_5)59 enc_5 = create_activation()(enc_5)60 #####-> Downsampling Layer Ends, from now onwards it is upsampling side61 upsampling_1 = layers.UpSampling2D((2,2))(enc_5)62 dec_1 = layers.Conv2D(512,kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(upsampling_1)63 dec_1 = create_activation()(dec_1)64 concat_3 = tf.concat([enc_4,dec_1],axis = -1)65 dec_2 = layers.Conv2D(512, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(concat_3)66 dec_2 = create_activation()(dec_2)67 dec_2 = layers.Conv2D(512, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(dec_2)68 dec_2 = create_activation()(dec_2)69 upsampling_2 = layers.UpSampling2D((2,2))(dec_2)70 dec_3 = layers.Conv2D(256, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(upsampling_2)71 dec_3 = create_activation()(dec_3)72 concat_4 = tf.concat([enc_3,dec_3],axis=-1)73 dec_4 = layers.Conv2D(256, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(concat_4)74 dec_4 = create_activation()(dec_4)75 dec_4 = layers.Conv2D(256, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(dec_4)76 dec_4 = create_activation()(dec_4)77 upsampling_3 = layers.UpSampling2D((2, 2))(dec_4)78 dec_5 = layers.Conv2D(128, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(upsampling_3)79 dec_5 = create_activation()(dec_5)80 concat_5 = tf.concat([enc_2,dec_5],axis=-1)81 dec_6 = layers.Conv2D(128, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(concat_5)82 dec_6 = create_activation()(dec_6)83 dec_6 = layers.Conv2D(128, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(dec_6)84 dec_6 = create_activation()(dec_6)85 upsampling_4 = layers.UpSampling2D((2, 2))(dec_6)86 dec_7 = layers.Conv2D(64, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(upsampling_4)87 dec_7 = create_activation()(dec_7)88 concat_6 = tf.concat([enc_1, dec_7], axis=-1)89 dec_8 = layers.Conv2D(64, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(concat_6)90 dec_8 = create_activation()(dec_8)91 dec_8 = layers.Conv2D(64, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(dec_8)92 dec_8 = create_activation()(dec_8)93 dec_9 = layers.Conv2D(4, kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(dec_8)94 dec_9 = create_activation()(dec_9)95 final_output = layers.Conv2D(4 , kernel_size=(3, 3), padding="same", kernel_initializer=tf.random_normal_initializer(stddev=0.01))(dec_9)96 #final_output = sigmoid_activation()(final_output)97 #model = models.Model(input_1,final_output)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful