Best Python code snippet using molotov_python
Param.py
Source:Param.py  
...143#end if"""144class Param(Element):145	"""The graphical parameter."""146	def __init__(self): Element.__init__(self)147	def get_input(self, *args, **kwargs):148		"""149		Get the graphical gtk class to represent this parameter.150		An enum requires and combo parameter.151		A non-enum with options gets a combined entry/combo parameter.152		All others get a standard entry parameter.153		@return gtk input class154		"""155		if self.is_enum(): return EnumParam(self, *args, **kwargs)156		if self.get_options(): return EnumEntryParam(self, *args, **kwargs)157		return EntryParam(self, *args, **kwargs)158	def get_markup(self):159		"""160		Get the markup for this param.161		@return a pango markup string...resnet.py
Source:resnet.py  
1import os2os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 3import tensorflow as tf4tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)5import numpy as np6from .layers import *7# Constructs a graph of resnet block8# Default input is channles last.9def resnet_block(_input,10                 isTraining=None,11                 channel=128,12                 require_bn=False,13                 require_in=False,14                 dilation_rate=(1,1),15                 kernel_size=(3,3),16                 data_format="channels_last",17                 activation=tf.nn.elu):18    19    # Figuring out channel size20    if channel%2 != 0:21        print("Even number channels are required.")22        return -123    down_channel = int(channel/2)24    25    # Figuring out bn axis26    if data_format=="channels_first":27        bn_axis = 128    elif data_format=="channels_last":29        bn_axis = 330    31    ##############################################32    # Full pre-activation style:                 #33    # bn -> elu -> conv2d -> bn -> elu -> conv2d #34    # bn -> elu -> conv2d                        #35    ##############################################36    37    if require_bn: _input = tf.layers.batch_normalization(_input,38                                                          training=isTraining,39                                                          center=True,40                                                          scale=True,41                                                          axis=bn_axis)42    if require_in: _input = tf.contrib.layers.instance_norm(_input)43        44    _input = activation(_input)45    _input = tf.layers.conv2d(_input,46                              filters=down_channel,47                              kernel_size=(1,1),48                              data_format=data_format)49    50    if require_bn: _input = tf.layers.batch_normalization(_input,51                                                          training=isTraining,52                                                          center=True,53                                                          scale=True,54                                                          axis=bn_axis)55    if require_in: _input = tf.contrib.layers.instance_norm(_input)56        57    _input = activation(_input)58    _input = tf.layers.conv2d(_input,59                              filters=down_channel, 60                              kernel_size=kernel_size, 61                              dilation_rate=dilation_rate,62                              data_format=data_format,63                              padding="same")64    65    if require_bn: _input = tf.layers.batch_normalization(_input,66                                                          training=isTraining,67                                                          center=True,68                                                          scale=True,69                                                          axis=bn_axis)70    if require_in: _input = tf.contrib.layers.instance_norm(_input)71        72    _input = activation(_input)73    _input = tf.layers.conv2d(_input,74                              filters=channel, 75                              kernel_size=(1,1),76                              data_format=data_format)77    return _input78# Creates a resnet architecture.79def build_resnet(_input,80                 channel,81                 num_chunks,82                 isTraining=None,83                 require_bn=False, #Batchnorm flag84                 require_in=False, #Instancenorm flag85                 data_format="channels_last",86                 first_projection=True,87                 no_last_dilation=False,88                 transpose_matrix=False,89                 dilation_cycle = [1,2,4,8]):90    91    # Projection of the very first input to 128 channels.92    if first_projection:93        _input = tf.layers.conv2d(_input,94                                  filters=channel,95                                  kernel_size=(1,1),96                                  dilation_rate=(1,1),97                                  data_format=data_format)98    99    # each chunk contatins 4 blocks with cycling dilation rates.100    for i in range(num_chunks):101        # dilation rates102        for dr in dilation_cycle:103            # save residual connection104            _residual = _input105            # pass through resnet block106            _conved = resnet_block(_input,107                                   isTraining=isTraining,108                                   require_bn=require_bn,109                                   require_in=require_in,110                                   channel=channel,111                                   dilation_rate=(dr, dr),112                                   data_format=data_format)113            # genearte input to the next block114            _input = _residual+_conved115            if transpose_matrix:116                _input = (_input+tf.transpose(_input, [0,2,1,3]))/2117            118    119    # 2 more extra blocks with dilation120    if no_last_dilation:121        for i in range(2):122            _residual = _input123            # pass through resnet block124            _conved = resnet_block(_input,125                                   isTraining=isTraining,126                                   require_bn=require_bn,127                                   require_in=require_in,128                                   channel=channel,129                                   dilation_rate=(1, 1),130                                   data_format=data_format)131            # genearte input to the next block132            _input = _residual+_conved133            if transpose_matrix:134                _input = (_input+tf.transpose(_input, [0,2,1,3]))/2135            ...perceptron.py
Source:perceptron.py  
1# -*- coding: utf-8 -*-2"""3Created on Wed Jul  7 10:44:16 20214@author: axelt5"""6import numpy as np7def activationFunction_sigmoid (value):8    return 1.0/(1+np.e**(-1*value))9class perceptron (object):10    ACTIVATIONFUNCTIONS = {"sigmoid":activationFunction_sigmoid}11    def __init__(self,weights,_activationFunction="sigmoid"):12        self.activationFunction = perceptron.ACTIVATIONFUNCTIONS[_activationFunction]13        self.weights=weights14        15    def process(self,_input):16        #h = x_1 * w_1 + ... + x_n * w_n17        total = (np.dot(_input,weights))18        #dot product can also be expressed by:19        #total = sum(_input*self.weights)20        return self.activationFunction(total)21    def update (self, _input, target, learningRate=0.4):22        #update rule: w_i = w_i + learning rate * (target-output)*x_i23        for i in range (0, len(self.weights)):24            self.weights[i]=self.weights[i]+learningRate*(target-self.process(_input))*_input[i]25if __name__ == "__main__":26    _input = np.array([5,6,7,8,9], dtype="float")27    target = 128    _input = _input/sum(_input)29    weights = np.random.rand(len(_input))30    print ("Initial Weights",weights)31    print ("Normalisedc Input",_input)32    p = perceptron(weights)33    34    output = p.process(_input)35    print ("Initital Output is ", output)36    37    GENERATIONS = 100038    LEARNINGRATE = 0.339    40    for i in range (0, GENERATIONS):41        p.update(_input,target,LEARNINGRATE)42    output = p.process(_input)43    print ("Trained Output is ", output)...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
