How to use outputs method in localstack

Best Python code snippet using localstack_python

models.py

Source:models.py Github

copy

Full Screen

1import tensorflow as tf2from ..utils.tf_utils import activation_fun3def UNET(nb_classes, inputs):4 """Compile a UNET model.5 Args:6 nb_classes: the number of classes to predict7 inputs: the input tensor8 Returns:9 an output tensor, with 'nb_classes' of featuremaps10 """11 padding = 'same'12 # Conv block 113 outputs = tf.compat.v1.layers.conv2d(inputs, 64, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv1-1', use_bias=True)14 outputs = tf.nn.relu(outputs)15 16 outputs = tf.compat.v1.layers.conv2d(outputs, 64, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv1-2', use_bias=True)17 outputs = tf.nn.relu(outputs)18 # Make a copy of conv1 output tensor 19 conv1_output = outputs20 21 # Down-sample 122 outputs = tf.compat.v1.layers.max_pooling2d(outputs,pool_size = 2,strides = 2,padding=padding)23 24 # Conv block 225 outputs = tf.compat.v1.layers.conv2d(outputs, 128, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv2-1', use_bias=True)26 outputs = tf.nn.relu(outputs)27 outputs = tf.compat.v1.layers.conv2d(outputs, 128, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv2-2', use_bias=True)28 outputs = tf.nn.relu(outputs)29 # Make a copy of conv2 output tensor 30 conv2_output = outputs31 32 # Down-sample 233 outputs = tf.compat.v1.layers.max_pooling2d(outputs,pool_size = 2,strides = 2,padding=padding)34 35 # Conv block 336 outputs = tf.compat.v1.layers.conv2d(outputs, 256, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv3-1', use_bias=True)37 outputs = tf.nn.relu(outputs)38 outputs = tf.compat.v1.layers.conv2d(outputs, 256, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv3-2', use_bias=True)39 outputs = tf.nn.relu(outputs)40 # Make a copy of conv3 output tensor 41 conv3_output = outputs42 43 # Down-sample 344 outputs = tf.compat.v1.layers.max_pooling2d(outputs,pool_size = 2,strides = 2,padding=padding)45 46 # Conv block 447 outputs = tf.compat.v1.layers.conv2d(outputs, 512, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv4-1', use_bias=True)48 outputs = tf.nn.relu(outputs)49 outputs = tf.compat.v1.layers.conv2d(outputs, 512, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv4-2', use_bias=True)50 outputs = tf.nn.relu(outputs)51 # Make a copy of conv4 output tensor 52 conv4_output = outputs53 54 # Down-sample 455 outputs = tf.compat.v1.layers.max_pooling2d(outputs,pool_size = 2,strides = 2,padding=padding)56 57 # Get extracted feature for RPN58 rpn_feature = outputs59 60 61 # Conv block 562 outputs = tf.compat.v1.layers.conv2d(outputs, 1024, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv5-1', use_bias=True)63 outputs = tf.nn.relu(outputs)64 outputs = tf.compat.v1.layers.conv2d(outputs, 1024, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv5-2', use_bias=True)65 outputs = tf.nn.relu(outputs)66 67 68 # Up-sample(Conv_transpose) 469 outputs = tf.compat.v1.layers.conv2d_transpose(outputs, 512, 3, strides=(2, 2),70 padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), use_bias=True)71 outputs = tf.nn.relu(outputs)72 73 # changing the former line to the line below will connect the 4th layer on both ends, 74 # and form the classic U-Net architecture, but in our experiments we found this did 75 # not gain a better performance, also thanks for Meryem Uzun-Per for pointing this out76 77 # outputs = tf.concat([conv4_output, outputs], 3)78 79 # Conv block 4'80 outputs = tf.compat.v1.layers.conv2d(outputs, 512, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv4-3', use_bias=True)81 outputs = tf.nn.relu(outputs)82 outputs = tf.compat.v1.layers.conv2d(outputs, 512, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv4-4', use_bias=True)83 outputs = tf.nn.relu(outputs)84 85 # Up-sample(Conv_transpose) 386 outputs = tf.compat.v1.layers.conv2d_transpose(outputs, 256, 3, strides=(2, 2),87 padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), use_bias=True)88 outputs = tf.concat([conv3_output, outputs], 3)89 90 # Conv block 3'91 outputs = tf.compat.v1.layers.conv2d(outputs, 256, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv3-3', use_bias=True)92 outputs = tf.nn.relu(outputs)93 outputs = tf.compat.v1.layers.conv2d(outputs, 256, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv3-4', use_bias=True)94 outputs = tf.nn.relu(outputs)95 96 # Up-sample(Conv_transpose) 297 outputs = tf.compat.v1.layers.conv2d_transpose(outputs, 128, 3, strides=(2, 2),98 padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), use_bias=True)99 outputs = tf.concat([conv2_output, outputs], 3)100 101 # Conv block 2'102 outputs = tf.compat.v1.layers.conv2d(outputs, 128, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv2-3', use_bias=True)103 outputs = tf.nn.relu(outputs)104 outputs = tf.compat.v1.layers.conv2d(outputs, 128, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv2-4', use_bias=True)105 outputs = tf.nn.relu(outputs)106 107 # Up-sample(Conv_transpose) 1108 outputs = tf.compat.v1.layers.conv2d_transpose(outputs, 64, 3, strides=(2, 2),109 padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), use_bias=True)110 outputs = tf.concat([conv1_output, outputs], 3)111 112 # Conv block 2'113 outputs = tf.compat.v1.layers.conv2d(outputs, 64, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv1-3', use_bias=True)114 outputs = tf.nn.relu(outputs)115 outputs = tf.compat.v1.layers.conv2d(outputs, 64, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='conv1-4', use_bias=True)116 outputs = tf.nn.relu(outputs)117 118 # only output 2 featuremaps at the end119 outputs = tf.compat.v1.layers.conv2d(outputs, nb_classes, 3, padding=padding, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), name='final', use_bias=False)120 ...

Full Screen

Full Screen

DCGAN.py

Source:DCGAN.py Github

copy

Full Screen

1import torch2import torch.nn as nn3class Generator(nn.Module):4 def __init__(self):5 super(Generator, self).__init__()6 self.encoder_conv_1 = nn.Sequential(7 nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),8 nn.LeakyReLU(inplace=False),9 )10 self.encoder_conv_2 = nn.Sequential(11 nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),12 nn.LeakyReLU(inplace=False),13 )14 self.encoder_conv_3 = nn.Sequential(15 nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),16 nn.LeakyReLU(inplace=False),17 )18 self.encoder_conv_4 = nn.Sequential(19 nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),20 nn.LeakyReLU(inplace=False),21 )22 self.encoder_conv_5 = nn.Sequential(23 nn.Conv2d(512, 512, kernel_size=4, stride=2, padding=1),24 nn.LeakyReLU(inplace=False),25 )26 self.encoder_conv_6 = nn.Sequential(27 nn.Conv2d(512, 512, kernel_size=4, stride=2, padding=1),28 nn.LeakyReLU(inplace=False),29 )30 self.decoder_conv_1 = nn.Sequential(31 nn.ConvTranspose2d(512, 512, kernel_size=4, stride=2, padding=1),32 nn.ReLU(inplace=False),33 nn.Dropout(0.5),34 )35 self.decoder_conv_2 = nn.Sequential(36 nn.ConvTranspose2d(1024, 512, kernel_size=4, stride=2, padding=1),37 nn.ReLU(inplace=False),38 nn.Dropout(0.5),39 )40 self.decoder_conv_3 = nn.Sequential(41 nn.ConvTranspose2d(1024, 256, kernel_size=4, stride=2, padding=1),42 nn.ReLU(inplace=False),43 )44 self.decoder_conv_4 = nn.Sequential(45 nn.ConvTranspose2d(512, 128, kernel_size=4, stride=2, padding=1),46 nn.ReLU(inplace=False),47 )48 self.decoder_conv_5 = nn.Sequential(49 nn.ConvTranspose2d(256, 64, kernel_size=4, stride=2, padding=1),50 nn.ReLU(inplace=False),51 )52 self.output = nn.Sequential(53 nn.Conv2d(64, 3, kernel_size=1, stride=1, padding=0),54 nn.Tanh(),55 )56 def forward(self, inputs):57 encoder_conv_1_outputs = self.encoder_conv_1(inputs)58 encoder_conv_2_outputs = self.encoder_conv_2(encoder_conv_1_outputs)59 encoder_conv_3_outputs = self.encoder_conv_3(encoder_conv_2_outputs)60 encoder_conv_4_outputs = self.encoder_conv_4(encoder_conv_3_outputs)61 encoder_conv_5_outputs = self.encoder_conv_5(encoder_conv_4_outputs)62 encoder_conv_6_outputs = self.encoder_conv_6(encoder_conv_5_outputs)63 decoder_conv_1_outputs = self.decoder_conv_1(encoder_conv_6_outputs)64 decoder_conv_2_outputs = self.decoder_conv_2(torch.cat([decoder_conv_1_outputs, encoder_conv_5_outputs], dim=1))65 decoder_conv_3_outputs = self.decoder_conv_3(torch.cat([decoder_conv_2_outputs, encoder_conv_4_outputs], dim=1))66 decoder_conv_4_outputs = self.decoder_conv_4(torch.cat([decoder_conv_3_outputs, encoder_conv_3_outputs], dim=1))67 decoder_conv_5_outputs = self.decoder_conv_5(torch.cat([decoder_conv_4_outputs, encoder_conv_2_outputs], dim=1))68 outputs = self.output(decoder_conv_5_outputs)69 return outputs70class Discriminator(nn.Module):71 def __init__(self):72 super(Discriminator, self).__init__()73 self.conv1 = nn.Sequential(74 nn.Conv2d(4, 64, kernel_size=4, stride=2, padding=1),75 nn.LeakyReLU(inplace=False),76 nn.BatchNorm2d(64),77 )78 self.conv2 = nn.Sequential(79 nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),80 nn.LeakyReLU(inplace=False),81 nn.BatchNorm2d(128),82 )83 self.conv3 = nn.Sequential(84 nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),85 nn.LeakyReLU(inplace=False),86 nn.BatchNorm2d(256),87 )88 self.conv4 = nn.Sequential(89 nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),90 nn.LeakyReLU(inplace=False),91 nn.BatchNorm2d(512),92 )93 self.conv5 = nn.Sequential(94 nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),95 nn.LeakyReLU(inplace=False),96 nn.BatchNorm2d(512),97 )98 self.output = nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0)99 def forward(self, inputs):100 outputs = self.conv1(inputs)101 outputs = self.conv2(outputs)102 outputs = self.conv3(outputs)103 outputs = self.conv4(outputs)104 outputs = self.conv5(outputs)105 outputs = self.output(outputs)...

Full Screen

Full Screen

test_cleaner.py

Source:test_cleaner.py Github

copy

Full Screen

1# coding: utf-82import os3import shutil4from abiflows.core.mastermind_abc import Cleaner5from abiflows.core.testing import AbiflowsTest6test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",7 "test_files")8class TestCleaner(AbiflowsTest):9 def test_cleaner(self):10 # Keep current working directory, create tmp directory and change to tmp directory11 cwd = os.getcwd()12 tmp_dir = '_tmp_cleaner'13 if os.path.exists(tmp_dir):14 shutil.rmtree(tmp_dir)15 os.makedirs(tmp_dir)16 os.chdir(tmp_dir)17 tmp_abs_dir = os.getcwd()18 # Create a list of files and directories19 os.makedirs('outputs')20 os.makedirs('outputs/formatted')21 os.makedirs('outputs/text')22 os.makedirs('results')23 os.makedirs('temporary')24 open('somefile.txt', "w").close()25 open('somefile.txt.backup', "w").close()26 open('outputs/text/text1.abc', "w").close()27 open('outputs/text/text2.abc', "w").close()28 open('outputs/text/text3.abc', "w").close()29 open('outputs/text/text1.def', "w").close()30 open('outputs/text/text15.def', "w").close()31 open('outputs/formatted/formatted1.txt', "w").close()32 open('outputs/formatted/formatted2.txt', "w").close()33 open('outputs/formatted/formatted3.log', "w").close()34 open('outputs/formatted/formatted4.log', "w").close()35 open('outputs/formatted/formatted5.log', "w").close()36 open('outputs/formatted/formatted6.bin', "w").close()37 open('outputs/formatted/formatted7.bog', "w").close()38 open('outputs/formatted/formatted8.beg', "w").close()39 open('temporary/item.log', "w").close()40 open('temporary/result.txt', "w").close()41 # Create a first cleaner42 cleaner1 = Cleaner(dirs_and_patterns=[{'directory': 'outputs/text',43 'patterns': ['text?.abc']}])44 cleaner1.clean(root_directory=tmp_abs_dir)45 # Check that the first cleaner did his job correctly46 self.assertTrue(os.path.exists('outputs/text/text1.def'))47 self.assertTrue(os.path.exists('outputs/text/text15.def'))48 self.assertFalse(os.path.exists('outputs/text/text1.abc'))49 self.assertFalse(os.path.exists('outputs/text/text2.abc'))50 self.assertFalse(os.path.exists('outputs/text/text3.abc'))51 # Create a second cleaner52 cleaner2 = Cleaner(dirs_and_patterns=[{'directory': '.',53 'patterns': ['temporary']},54 {'directory': 'outputs/formatted',55 'patterns': ['*[1-4].log', '*.b?g']}])56 cleaner2.clean(root_directory=tmp_abs_dir)57 # Check that the first cleaner did his job correctly58 self.assertTrue(os.path.exists('outputs/formatted/formatted1.txt'))59 self.assertTrue(os.path.exists('outputs/formatted/formatted2.txt'))60 self.assertFalse(os.path.exists('outputs/formatted/formatted3.log'))61 self.assertFalse(os.path.exists('outputs/formatted/formatted4.log'))62 self.assertTrue(os.path.exists('outputs/formatted/formatted5.log'))63 self.assertTrue(os.path.exists('outputs/formatted/formatted6.bin'))64 self.assertFalse(os.path.exists('outputs/formatted/formatted7.bog'))65 self.assertFalse(os.path.exists('outputs/formatted/formatted8.beg'))66 self.assertFalse(os.path.exists('temporary'))67 self.assertTrue(os.path.exists('outputs/formatted'))68 # Change back to the initial working directory and remove the tmp directory69 os.chdir(cwd)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful