How to use list_layers method in localstack

Best Python code snippet using localstack_python

melgan.py

Source:melgan.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2# Copyright 2020 The MelGAN Authors and Minh Nguyen (@dathudeptrai)3#4# Licensed under the Apache License, Version 2.0 (the "License");5# you may not use this file except in compliance with the License.6# You may obtain a copy of the License at7#8# http://www.apache.org/licenses/LICENSE-2.09#10# Unless required by applicable law or agreed to in writing, software11# distributed under the License is distributed on an "AS IS" BASIS,12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.13# See the License for the specific language governing permissions and14# limitations under the License.15"""MelGAN Modules."""16import numpy as np17import tensorflow as tf18from tensorflow_tts.utils import GroupConv1D, WeightNormalization19def get_initializer(initializer_seed=42):20 """Creates a `tf.initializers.glorot_normal` with the given seed.21 Args:22 initializer_seed: int, initializer seed.23 Returns:24 GlorotNormal initializer with seed = `initializer_seed`.25 """26 return tf.keras.initializers.GlorotNormal(seed=initializer_seed)27class TFReflectionPad1d(tf.keras.layers.Layer):28 """Tensorflow ReflectionPad1d module."""29 def __init__(self, padding_size, padding_type="REFLECT", **kwargs):30 """Initialize TFReflectionPad1d module.31 Args:32 padding_size (int)33 padding_type (str) ("CONSTANT", "REFLECT", or "SYMMETRIC". Default is "REFLECT")34 """35 super().__init__(**kwargs)36 self.padding_size = padding_size37 self.padding_type = padding_type38 def call(self, x):39 """Calculate forward propagation.40 Args:41 x (Tensor): Input tensor (B, T, C).42 Returns:43 Tensor: Padded tensor (B, T + 2 * padding_size, C).44 """45 return tf.pad(46 x,47 [[0, 0], [self.padding_size, self.padding_size], [0, 0]],48 self.padding_type,49 )50class TFConvTranspose1d(tf.keras.layers.Layer):51 """Tensorflow ConvTranspose1d module."""52 def __init__(53 self,54 filters,55 kernel_size,56 strides,57 padding,58 is_weight_norm,59 initializer_seed,60 **kwargs61 ):62 """Initialize TFConvTranspose1d( module.63 Args:64 filters (int): Number of filters.65 kernel_size (int): kernel size.66 strides (int): Stride width.67 padding (str): Padding type ("same" or "valid").68 """69 super().__init__(**kwargs)70 self.conv1d_transpose = tf.keras.layers.Conv2DTranspose(71 filters=filters,72 kernel_size=(kernel_size, 1),73 strides=(strides, 1),74 padding="same",75 kernel_initializer=get_initializer(initializer_seed),76 )77 if is_weight_norm:78 self.conv1d_transpose = WeightNormalization(self.conv1d_transpose)79 def call(self, x):80 """Calculate forward propagation.81 Args:82 x (Tensor): Input tensor (B, T, C).83 Returns:84 Tensor: Output tensor (B, T', C').85 """86 x = tf.expand_dims(x, 2)87 x = self.conv1d_transpose(x)88 x = tf.squeeze(x, 2)89 return x90class TFResidualStack(tf.keras.layers.Layer):91 """Tensorflow ResidualStack module."""92 def __init__(93 self,94 kernel_size,95 filters,96 dilation_rate,97 use_bias,98 nonlinear_activation,99 nonlinear_activation_params,100 is_weight_norm,101 initializer_seed,102 **kwargs103 ):104 """Initialize TFResidualStack module.105 Args:106 kernel_size (int): Kernel size.107 filters (int): Number of filters.108 dilation_rate (int): Dilation rate.109 use_bias (bool): Whether to add bias parameter in convolution layers.110 nonlinear_activation (str): Activation function module name.111 nonlinear_activation_params (dict): Hyperparameters for activation function.112 """113 super().__init__(**kwargs)114 self.blocks = [115 getattr(tf.keras.layers, nonlinear_activation)(116 **nonlinear_activation_params117 ),118 TFReflectionPad1d((kernel_size - 1) // 2 * dilation_rate),119 tf.keras.layers.Conv1D(120 filters=filters,121 kernel_size=kernel_size,122 dilation_rate=dilation_rate,123 use_bias=use_bias,124 kernel_initializer=get_initializer(initializer_seed),125 ),126 getattr(tf.keras.layers, nonlinear_activation)(127 **nonlinear_activation_params128 ),129 tf.keras.layers.Conv1D(130 filters=filters,131 kernel_size=1,132 use_bias=use_bias,133 kernel_initializer=get_initializer(initializer_seed),134 ),135 ]136 self.shortcut = tf.keras.layers.Conv1D(137 filters=filters,138 kernel_size=1,139 use_bias=use_bias,140 kernel_initializer=get_initializer(initializer_seed),141 name="shortcut",142 )143 # apply weightnorm144 if is_weight_norm:145 self._apply_weightnorm(self.blocks)146 self.shortcut = WeightNormalization(self.shortcut)147 def call(self, x):148 """Calculate forward propagation.149 Args:150 x (Tensor): Input tensor (B, T, C).151 Returns:152 Tensor: Output tensor (B, T, C).153 """154 _x = tf.identity(x)155 for layer in self.blocks:156 _x = layer(_x)157 shortcut = self.shortcut(x)158 return shortcut + _x159 def _apply_weightnorm(self, list_layers):160 """Try apply weightnorm for all layer in list_layers."""161 for i in range(len(list_layers)):162 try:163 layer_name = list_layers[i].name.lower()164 if "conv1d" in layer_name or "dense" in layer_name:165 list_layers[i] = WeightNormalization(list_layers[i])166 except Exception:167 pass168class TFMelGANGenerator(tf.keras.Model):169 """Tensorflow MelGAN generator module."""170 def __init__(self, config, **kwargs):171 """Initialize TFMelGANGenerator module.172 Args:173 config: config object of Melgan generator.174 """175 super().__init__(**kwargs)176 # check hyper parameter is valid or not177 assert config.filters >= np.prod(config.upsample_scales)178 assert config.filters % (2 ** len(config.upsample_scales)) == 0179 # add initial layer180 layers = []181 layers += [182 TFReflectionPad1d(183 (config.kernel_size - 1) // 2,184 padding_type=config.padding_type,185 name="first_reflect_padding",186 ),187 tf.keras.layers.Conv1D(188 filters=config.filters,189 kernel_size=config.kernel_size,190 use_bias=config.use_bias,191 kernel_initializer=get_initializer(config.initializer_seed),192 ),193 ]194 for i, upsample_scale in enumerate(config.upsample_scales):195 # add upsampling layer196 layers += [197 getattr(tf.keras.layers, config.nonlinear_activation)(198 **config.nonlinear_activation_params199 ),200 TFConvTranspose1d(201 filters=config.filters // (2 ** (i + 1)),202 kernel_size=upsample_scale * 2,203 strides=upsample_scale,204 padding="same",205 is_weight_norm=config.is_weight_norm,206 initializer_seed=config.initializer_seed,207 name="conv_transpose_._{}".format(i),208 ),209 ]210 # ad residual stack layer211 for j in range(config.stacks):212 layers += [213 TFResidualStack(214 kernel_size=config.stack_kernel_size,215 filters=config.filters // (2 ** (i + 1)),216 dilation_rate=config.stack_kernel_size ** j,217 use_bias=config.use_bias,218 nonlinear_activation=config.nonlinear_activation,219 nonlinear_activation_params=config.nonlinear_activation_params,220 is_weight_norm=config.is_weight_norm,221 initializer_seed=config.initializer_seed,222 name="residual_stack_._{}._._{}".format(i, j),223 )224 ]225 # add final layer226 layers += [227 getattr(tf.keras.layers, config.nonlinear_activation)(228 **config.nonlinear_activation_params229 ),230 TFReflectionPad1d(231 (config.kernel_size - 1) // 2,232 padding_type=config.padding_type,233 name="last_reflect_padding",234 ),235 tf.keras.layers.Conv1D(236 filters=config.out_channels,237 kernel_size=config.kernel_size,238 use_bias=config.use_bias,239 kernel_initializer=get_initializer(config.initializer_seed),240 ),241 ]242 if config.use_final_nolinear_activation:243 layers += [tf.keras.layers.Activation("tanh")]244 if config.is_weight_norm is True:245 self._apply_weightnorm(layers)246 self.melgan = tf.keras.models.Sequential(layers)247 def call(self, mels, **kwargs):248 """Calculate forward propagation.249 Args:250 c (Tensor): Input tensor (B, T, channels)251 Returns:252 Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)253 """254 return self.inference(mels)255 @tf.function(256 input_signature=[257 tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32, name="mels")258 ]259 )260 def inference(self, mels):261 return self.melgan(mels)262 @tf.function(263 input_signature=[264 tf.TensorSpec(shape=[1, None, 80], dtype=tf.float32, name="mels")265 ]266 )267 def inference_tflite(self, mels):268 return self.melgan(mels)269 def _apply_weightnorm(self, list_layers):270 """Try apply weightnorm for all layer in list_layers."""271 for i in range(len(list_layers)):272 try:273 layer_name = list_layers[i].name.lower()274 if "conv1d" in layer_name or "dense" in layer_name:275 list_layers[i] = WeightNormalization(list_layers[i])276 except Exception:277 pass278 def _build(self):279 """Build model by passing fake input."""280 fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)281 self(fake_mels)282class TFMelGANDiscriminator(tf.keras.layers.Layer):283 """Tensorflow MelGAN generator module."""284 def __init__(285 self,286 out_channels=1,287 kernel_sizes=[5, 3],288 filters=16,289 max_downsample_filters=1024,290 use_bias=True,291 downsample_scales=[4, 4, 4, 4],292 nonlinear_activation="LeakyReLU",293 nonlinear_activation_params={"alpha": 0.2},294 padding_type="REFLECT",295 is_weight_norm=True,296 initializer_seed=0.02,297 **kwargs298 ):299 """Initilize MelGAN discriminator module.300 Args:301 out_channels (int): Number of output channels.302 kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,303 and the first and the second kernel sizes will be used for the last two layers.304 For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15.305 the last two layers' kernel size will be 5 and 3, respectively.306 filters (int): Initial number of filters for conv layer.307 max_downsample_filters (int): Maximum number of filters for downsampling layers.308 use_bias (bool): Whether to add bias parameter in convolution layers.309 downsample_scales (list): List of downsampling scales.310 nonlinear_activation (str): Activation function module name.311 nonlinear_activation_params (dict): Hyperparameters for activation function.312 padding_type (str): Padding type (support only "REFLECT", "CONSTANT", "SYMMETRIC")313 """314 super().__init__(**kwargs)315 discriminator = []316 # check kernel_size is valid317 assert len(kernel_sizes) == 2318 assert kernel_sizes[0] % 2 == 1319 assert kernel_sizes[1] % 2 == 1320 # add first layer321 discriminator = [322 TFReflectionPad1d(323 (np.prod(kernel_sizes) - 1) // 2, padding_type=padding_type324 ),325 tf.keras.layers.Conv1D(326 filters=filters,327 kernel_size=int(np.prod(kernel_sizes)),328 use_bias=use_bias,329 kernel_initializer=get_initializer(initializer_seed),330 ),331 getattr(tf.keras.layers, nonlinear_activation)(332 **nonlinear_activation_params333 ),334 ]335 # add downsample layers336 in_chs = filters337 with tf.keras.utils.CustomObjectScope({"GroupConv1D": GroupConv1D}):338 for downsample_scale in downsample_scales:339 out_chs = min(in_chs * downsample_scale, max_downsample_filters)340 discriminator += [341 GroupConv1D(342 filters=out_chs,343 kernel_size=downsample_scale * 10 + 1,344 strides=downsample_scale,345 padding="same",346 use_bias=use_bias,347 groups=in_chs // 4,348 kernel_initializer=get_initializer(initializer_seed),349 )350 ]351 discriminator += [352 getattr(tf.keras.layers, nonlinear_activation)(353 **nonlinear_activation_params354 )355 ]356 in_chs = out_chs357 # add final layers358 out_chs = min(in_chs * 2, max_downsample_filters)359 discriminator += [360 tf.keras.layers.Conv1D(361 filters=out_chs,362 kernel_size=kernel_sizes[0],363 padding="same",364 use_bias=use_bias,365 kernel_initializer=get_initializer(initializer_seed),366 )367 ]368 discriminator += [369 getattr(tf.keras.layers, nonlinear_activation)(370 **nonlinear_activation_params371 )372 ]373 discriminator += [374 tf.keras.layers.Conv1D(375 filters=out_channels,376 kernel_size=kernel_sizes[1],377 padding="same",378 use_bias=use_bias,379 kernel_initializer=get_initializer(initializer_seed),380 )381 ]382 if is_weight_norm is True:383 self._apply_weightnorm(discriminator)384 self.disciminator = discriminator385 def call(self, x, **kwargs):386 """Calculate forward propagation.387 Args:388 x (Tensor): Input noise signal (B, T, 1).389 Returns:390 List: List of output tensors of each layer.391 """392 outs = []393 for f in self.disciminator:394 x = f(x)395 outs += [x]396 return outs397 def _apply_weightnorm(self, list_layers):398 """Try apply weightnorm for all layer in list_layers."""399 for i in range(len(list_layers)):400 try:401 layer_name = list_layers[i].name.lower()402 if "conv1d" in layer_name or "dense" in layer_name:403 list_layers[i] = WeightNormalization(list_layers[i])404 except Exception:405 pass406class TFMelGANMultiScaleDiscriminator(tf.keras.Model):407 """MelGAN multi-scale discriminator module."""408 def __init__(self, config, **kwargs):409 """Initilize MelGAN multi-scale discriminator module.410 Args:411 config: config object for melgan discriminator412 """413 super().__init__(**kwargs)414 self.discriminator = []415 # add discriminator416 for i in range(config.scales):417 self.discriminator += [418 TFMelGANDiscriminator(419 out_channels=config.out_channels,420 kernel_sizes=config.kernel_sizes,421 filters=config.filters,422 max_downsample_filters=config.max_downsample_filters,423 use_bias=config.use_bias,424 downsample_scales=config.downsample_scales,425 nonlinear_activation=config.nonlinear_activation,426 nonlinear_activation_params=config.nonlinear_activation_params,427 padding_type=config.padding_type,428 is_weight_norm=config.is_weight_norm,429 initializer_seed=config.initializer_seed,430 name="melgan_discriminator_scale_._{}".format(i),431 )432 ]433 self.pooling = getattr(tf.keras.layers, config.downsample_pooling)(434 **config.downsample_pooling_params435 )436 def call(self, x, **kwargs):437 """Calculate forward propagation.438 Args:439 x (Tensor): Input noise signal (B, T, 1).440 Returns:441 List: List of list of each discriminator outputs, which consists of each layer output tensors.442 """443 outs = []444 for f in self.discriminator:445 outs += [f(x)]446 x = self.pooling(x)...

Full Screen

Full Screen

particle.py

Source:particle.py Github

copy

Full Screen

1# Copyright (c) 2020 Francisco Erivaldo Fernandes Junio2# Licensed under MIT License3import numpy as np4from copy import deepcopy5import utils6import keras.backend7from keras.models import Model, Sequential8from keras.layers import Input, Add, Dense, Dropout, Flatten9from keras.layers import Activation, Conv2D, MaxPooling2D, AveragePooling2D10from keras.layers.advanced_activations import LeakyReLU11from keras import regularizers 12from keras.optimizers import Adam, Nadam13from keras.preprocessing.image import ImageDataGenerator14from keras.layers.normalization import BatchNormalization15import os16import tensorflow as tf17# Hide Tensorflow INFOS and WARNINGS18os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 19class Particle:20 def __init__(self, min_layer, max_layer, max_pool_layers, input_width, input_height, input_channels, \21 conv_prob, pool_prob, fc_prob, max_conv_kernel, max_out_ch, max_fc_neurons, output_dim):22 self.input_width = input_width23 self.input_height = input_height24 self.input_channels = input_channels25 self.num_pool_layers = 026 self.max_pool_layers = max_pool_layers27 self.feature_width = input_width28 self.feature_height = input_height29 self.depth = np.random.randint(min_layer, max_layer)30 self.conv_prob = conv_prob31 self.pool_prob = pool_prob32 self.fc_prob = fc_prob33 self.max_conv_kernel = max_conv_kernel34 self.max_out_ch = max_out_ch35 36 self.max_fc_neurons = max_fc_neurons37 self.output_dim = output_dim38 self.layers = []39 self.acc = None40 self.vel = [] # Initial velocity41 self.pBest = []42 # Build particle architecture43 self.initialization()44 45 # Update initial velocity46 for i in range(len(self.layers)):47 if self.layers[i]["type"] != "fc":48 self.vel.append({"type": "keep"})49 else:50 self.vel.append({"type": "keep_fc"})51 52 self.model = None53 self.pBest = deepcopy(self)54 55 def __str__(self):56 string = ""57 for z in range(len(self.layers)):58 string = string + self.layers[z]["type"] + " | "59 60 return string61 def initialization(self):62 out_channel = np.random.randint(3, self.max_out_ch)63 conv_kernel = np.random.randint(3, self.max_conv_kernel)64 65 # First layer is always a convolution layer66 self.layers.append({"type": "conv", "ou_c": out_channel, "kernel": conv_kernel})67 conv_prob = self.conv_prob68 pool_prob = conv_prob + self.pool_prob69 fc_prob = pool_prob70 for i in range(1, self.depth):71 if self.layers[-1]["type"] == "fc":72 layer_type = 1.173 else:74 layer_type = np.random.rand()75 if layer_type < conv_prob:76 self.layers = utils.add_conv(self.layers, self.max_out_ch, self.max_conv_kernel)77 elif layer_type >= conv_prob and layer_type <= pool_prob:78 self.layers, self.num_pool_layers = utils.add_pool(self.layers, self.fc_prob, self.num_pool_layers, self.max_pool_layers, self.max_out_ch, self.max_conv_kernel, self.max_fc_neurons, self.output_dim)79 80 elif layer_type >= fc_prob:81 self.layers = utils.add_fc(self.layers, self.max_fc_neurons)82 83 self.layers[-1] = {"type": "fc", "ou_c": self.output_dim, "kernel": -1}84 85 def velocity(self, gBest, Cg):86 self.vel = utils.computeVelocity(gBest, self.pBest.layers, self.layers, Cg)87 def update(self):88 new_p = utils.updateParticle(self.layers, self.vel)89 new_p = self.validate(new_p)90 91 self.layers = new_p92 self.model = None93 def validate(self, list_layers):94 # Last layer should always be a fc with number of neurons equal to the number of outputs95 list_layers[-1] = {"type": "fc", "ou_c": self.output_dim, "kernel": -1}96 # Remove excess of Pooling layers97 self.num_pool_layers = 098 for i in range(len(list_layers)):99 if list_layers[i]["type"] == "max_pool" or list_layers[i]["type"] == "avg_pool":100 self.num_pool_layers += 1101 102 if self.num_pool_layers >= self.max_pool_layers:103 list_layers[i]["type"] = "remove"104 # Now, fix the inputs of each conv and pool layers105 updated_list_layers = []106 107 for i in range(0, len(list_layers)):108 if list_layers[i]["type"] != "remove":109 if list_layers[i]["type"] == "conv":110 updated_list_layers.append({"type": "conv", "ou_c": list_layers[i]["ou_c"], "kernel": list_layers[i]["kernel"]})111 112 if list_layers[i]["type"] == "fc":113 updated_list_layers.append(list_layers[i])114 if list_layers[i]["type"] == "max_pool":115 updated_list_layers.append({"type": "max_pool", "ou_c": -1, "kernel": 2})116 if list_layers[i]["type"] == "avg_pool":117 updated_list_layers.append({"type": "avg_pool", "ou_c": -1, "kernel": 2})118 return updated_list_layers119 ##### Model methods ####120 def model_compile(self, dropout_rate):121 list_layers = self.layers122 self.model = Sequential()123 for i in range(len(list_layers)):124 if list_layers[i]["type"] == "conv":125 n_out_filters = list_layers[i]["ou_c"]126 kernel_size = list_layers[i]["kernel"]127 if i == 0:128 in_w = self.input_width129 in_h = self.input_height130 in_c = self.input_channels131 self.model.add(Conv2D(n_out_filters, kernel_size, strides=(1,1), padding="same", data_format="channels_last", kernel_initializer='he_normal', bias_initializer='he_normal', activation=None, input_shape=(in_w, in_h, in_c)))132 self.model.add(BatchNormalization())133 self.model.add(Activation("relu"))134 else:135 self.model.add(Dropout(dropout_rate))136 self.model.add(Conv2D(n_out_filters, kernel_size, strides=(1,1), padding="same", kernel_initializer='he_normal', bias_initializer='he_normal', activation=None))137 self.model.add(BatchNormalization())138 self.model.add(Activation("relu"))139 if list_layers[i]["type"] == "max_pool":140 kernel_size = list_layers[i]["kernel"]141 self.model.add(MaxPooling2D(pool_size=(3, 3), strides=2))142 if list_layers[i]["type"] == "avg_pool":143 kernel_size = list_layers[i]["kernel"]144 self.model.add(AveragePooling2D(pool_size=(3, 3), strides=2))145 146 if list_layers[i]["type"] == "fc":147 if list_layers[i-1]["type"] != "fc":148 self.model.add(Flatten())149 self.model.add(Dropout(dropout_rate))150 if i == len(list_layers) - 1:151 self.model.add(Dense(list_layers[i]["ou_c"], kernel_initializer='he_normal', bias_initializer='he_normal', activation=None))152 self.model.add(BatchNormalization())153 self.model.add(Activation("softmax"))154 else:155 self.model.add(Dense(list_layers[i]["ou_c"], kernel_initializer='he_normal', bias_initializer='he_normal', kernel_regularizer=regularizers.l2(0.01), activation=None))156 self.model.add(BatchNormalization())157 self.model.add(Activation("relu"))158 adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=0.0)159 self.model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])160 161 def model_fit(self, x_train, y_train, batch_size, epochs):162 # TODO: add option to only use a sample size of the dataset163 hist = self.model.fit(x=x_train, y=y_train, validation_split=0.0, batch_size=batch_size, epochs=epochs, verbose=2)164 return hist165 def model_fit_complete(self, x_train, y_train, batch_size, epochs):166 hist = self.model.fit(x=x_train, y=y_train, validation_split=0.0, batch_size=batch_size, epochs=epochs, verbose=2)167 return hist168 169 def model_delete(self):170 # This is used to free up memory during PSO training171 del self.model172 keras.backend.clear_session()173 tf.reset_default_graph()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful