How to use hidden method in Lemoncheesecake

Best Python code snippet using lemoncheesecake

stt_layer_lstm.py

Source:stt_layer_lstm.py Github

copy

Full Screen

1# Licensed to the Apache Software Foundation (ASF) under one2# or more contributor license agreements. See the NOTICE file3# distributed with this work for additional information4# regarding copyright ownership. The ASF licenses this file5# to you under the Apache License, Version 2.0 (the6# "License"); you may not use this file except in compliance7# with the License. You may obtain a copy of the License at8#9# http://www.apache.org/licenses/LICENSE-2.010#11# Unless required by applicable law or agreed to in writing,12# software distributed under the License is distributed on an13# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY14# KIND, either express or implied. See the License for the15# specific language governing permissions and limitations16# under the License.17# pylint:skip-file18from collections import namedtuple19import mxnet as mx20from stt_layer_batchnorm import batchnorm21LSTMState = namedtuple("LSTMState", ["c", "h"])22LSTMParam = namedtuple("LSTMParam", ["i2h_weight", "i2h_bias",23 "h2h_weight", "h2h_bias",24 "ph2h_weight",25 "c2i_bias", "c2f_bias", "c2o_bias"])26LSTMModel = namedtuple("LSTMModel", ["rnn_exec", "symbol",27 "init_states", "last_states",28 "seq_data", "seq_labels", "seq_outputs",29 "param_blocks"])30def vanilla_lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, is_batchnorm=False, gamma=None, beta=None, name=None):31 """LSTM Cell symbol"""32 i2h = mx.sym.FullyConnected(data=indata,33 weight=param.i2h_weight,34 bias=param.i2h_bias,35 num_hidden=num_hidden * 4,36 name="t%d_l%d_i2h" % (seqidx, layeridx))37 if is_batchnorm:38 if name is not None:39 i2h = batchnorm(net=i2h, gamma=gamma, beta=beta, name="%s_batchnorm" % name)40 else:41 i2h = batchnorm(net=i2h, gamma=gamma, beta=beta)42 h2h = mx.sym.FullyConnected(data=prev_state.h,43 weight=param.h2h_weight,44 bias=param.h2h_bias,45 num_hidden=num_hidden * 4,46 name="t%d_l%d_h2h" % (seqidx, layeridx))47 gates = i2h + h2h48 slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,49 name="t%d_l%d_slice" % (seqidx, layeridx))50 in_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid")51 in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh")52 forget_gate = mx.sym.Activation(slice_gates[2], act_type="sigmoid")53 out_gate = mx.sym.Activation(slice_gates[3], act_type="sigmoid")54 next_c = (forget_gate * prev_state.c) + (in_gate * in_transform)55 next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh")56 return LSTMState(c=next_c, h=next_h)57def lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0., num_hidden_proj=0, is_batchnorm=False,58 gamma=None, beta=None, name=None):59 """LSTM Cell symbol"""60 # dropout input61 if dropout > 0.:62 indata = mx.sym.Dropout(data=indata, p=dropout)63 i2h = mx.sym.FullyConnected(data=indata,64 weight=param.i2h_weight,65 bias=param.i2h_bias,66 num_hidden=num_hidden * 4,67 name="t%d_l%d_i2h" % (seqidx, layeridx))68 if is_batchnorm:69 if name is not None:70 i2h = batchnorm(net=i2h, gamma=gamma, beta=beta, name="%s_batchnorm" % name)71 else:72 i2h = batchnorm(net=i2h, gamma=gamma, beta=beta)73 h2h = mx.sym.FullyConnected(data=prev_state.h,74 weight=param.h2h_weight,75 # bias=param.h2h_bias,76 no_bias=True,77 num_hidden=num_hidden * 4,78 name="t%d_l%d_h2h" % (seqidx, layeridx))79 gates = i2h + h2h80 slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,81 name="t%d_l%d_slice" % (seqidx, layeridx))82 Wcidc = mx.sym.broadcast_mul(param.c2i_bias, prev_state.c) + slice_gates[0]83 in_gate = mx.sym.Activation(Wcidc, act_type="sigmoid")84 in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh")85 Wcfdc = mx.sym.broadcast_mul(param.c2f_bias, prev_state.c) + slice_gates[2]86 forget_gate = mx.sym.Activation(Wcfdc, act_type="sigmoid")87 next_c = (forget_gate * prev_state.c) + (in_gate * in_transform)88 Wcoct = mx.sym.broadcast_mul(param.c2o_bias, next_c) + slice_gates[3]89 out_gate = mx.sym.Activation(Wcoct, act_type="sigmoid")90 next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh")91 if num_hidden_proj > 0:92 proj_next_h = mx.sym.FullyConnected(data=next_h,93 weight=param.ph2h_weight,94 no_bias=True,95 num_hidden=num_hidden_proj,96 name="t%d_l%d_ph2h" % (seqidx, layeridx))97 return LSTMState(c=next_c, h=proj_next_h)98 else:99 return LSTMState(c=next_c, h=next_h)100def lstm_unroll(net, num_lstm_layer, seq_len, num_hidden_lstm_list, dropout=0., num_hidden_proj=0,101 lstm_type='fc_lstm', is_batchnorm=False, prefix="", direction="forward", is_bucketing=False):102 if num_lstm_layer > 0:103 param_cells = []104 last_states = []105 for i in range(num_lstm_layer):106 param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable(prefix + "l%d_i2h_weight" % i),107 i2h_bias=mx.sym.Variable(prefix + "l%d_i2h_bias" % i),108 h2h_weight=mx.sym.Variable(prefix + "l%d_h2h_weight" % i),109 h2h_bias=mx.sym.Variable(prefix + "l%d_h2h_bias" % i),110 ph2h_weight=mx.sym.Variable(prefix + "l%d_ph2h_weight" % i),111 c2i_bias=mx.sym.Variable(prefix + "l%d_c2i_bias" % i,112 shape=(1, num_hidden_lstm_list[i])),113 c2f_bias=mx.sym.Variable(prefix + "l%d_c2f_bias" % i,114 shape=(1, num_hidden_lstm_list[i])),115 c2o_bias=mx.sym.Variable(prefix + "l%d_c2o_bias" % i,116 shape=(1, num_hidden_lstm_list[i]))117 ))118 state = LSTMState(c=mx.sym.Variable(prefix + "l%d_init_c" % i),119 h=mx.sym.Variable(prefix + "l%d_init_h" % i))120 last_states.append(state)121 assert (len(last_states) == num_lstm_layer)122 # declare batchnorm param(gamma,beta) in timestep wise123 if is_batchnorm:124 batchnorm_gamma = []125 batchnorm_beta = []126 if is_bucketing:127 for l in range(num_lstm_layer):128 batchnorm_gamma.append(mx.sym.Variable(prefix + "l%d_i2h_gamma" % l))129 batchnorm_beta.append(mx.sym.Variable(prefix + "l%d_i2h_beta" % l))130 else:131 for seqidx in range(seq_len):132 batchnorm_gamma.append(mx.sym.Variable(prefix + "t%d_i2h_gamma" % seqidx))133 batchnorm_beta.append(mx.sym.Variable(prefix + "t%d_i2h_beta" % seqidx))134 hidden_all = []135 for seqidx in range(seq_len):136 if direction == "forward":137 k = seqidx138 hidden = net[k]139 elif direction == "backward":140 k = seq_len - seqidx - 1141 hidden = net[k]142 else:143 raise Exception("direction should be whether forward or backward")144 # stack LSTM145 for i in range(num_lstm_layer):146 if i == 0:147 dp = 0.148 else:149 dp = dropout150 if lstm_type == 'fc_lstm':151 if is_batchnorm:152 if is_bucketing:153 next_state = lstm(num_hidden_lstm_list[i],154 indata=hidden,155 prev_state=last_states[i],156 param=param_cells[i],157 seqidx=k,158 layeridx=i,159 dropout=dp,160 num_hidden_proj=num_hidden_proj,161 is_batchnorm=is_batchnorm,162 gamma=batchnorm_gamma[i],163 beta=batchnorm_beta[i],164 name=prefix + ("t%d_l%d" % (seqidx, i))165 )166 else:167 next_state = lstm(num_hidden_lstm_list[i],168 indata=hidden,169 prev_state=last_states[i],170 param=param_cells[i],171 seqidx=k,172 layeridx=i,173 dropout=dp,174 num_hidden_proj=num_hidden_proj,175 is_batchnorm=is_batchnorm,176 name=prefix + ("t%d_l%d" % (seqidx, i))177 )178 elif lstm_type == 'vanilla_lstm':179 if is_batchnorm:180 next_state = vanilla_lstm(num_hidden_lstm_list[i], indata=hidden,181 prev_state=last_states[i],182 param=param_cells[i],183 seqidx=k, layeridx=i,184 is_batchnorm=is_batchnorm,185 gamma=batchnorm_gamma[i],186 beta=batchnorm_beta[i],187 name=prefix + ("t%d_l%d" % (seqidx, i))188 )189 else:190 next_state = vanilla_lstm(num_hidden_lstm_list[i], indata=hidden,191 prev_state=last_states[i],192 param=param_cells[i],193 seqidx=k, layeridx=i,194 is_batchnorm=is_batchnorm,195 name=prefix + ("t%d_l%d" % (seqidx, i))196 )197 else:198 raise Exception("lstm type %s error" % lstm_type)199 hidden = next_state.h200 last_states[i] = next_state201 # decoder202 if dropout > 0.:203 hidden = mx.sym.Dropout(data=hidden, p=dropout)204 if direction == "forward":205 hidden_all.append(hidden)206 elif direction == "backward":207 hidden_all.insert(0, hidden)208 else:209 raise Exception("direction should be whether forward or backward")210 net = hidden_all211 return net212def bi_lstm_unroll(net, num_lstm_layer, seq_len, num_hidden_lstm_list, dropout=0., num_hidden_proj=0,213 lstm_type='fc_lstm', is_batchnorm=False, is_bucketing=False):214 if num_lstm_layer > 0:215 net_forward = lstm_unroll(net=net,216 num_lstm_layer=num_lstm_layer,217 seq_len=seq_len,218 num_hidden_lstm_list=num_hidden_lstm_list,219 dropout=dropout,220 num_hidden_proj=num_hidden_proj,221 lstm_type=lstm_type,222 is_batchnorm=is_batchnorm,223 prefix="forward_",224 direction="forward",225 is_bucketing=is_bucketing)226 net_backward = lstm_unroll(net=net,227 num_lstm_layer=num_lstm_layer,228 seq_len=seq_len,229 num_hidden_lstm_list=num_hidden_lstm_list,230 dropout=dropout,231 num_hidden_proj=num_hidden_proj,232 lstm_type=lstm_type,233 is_batchnorm=is_batchnorm,234 prefix="backward_",235 direction="backward",236 is_bucketing=is_bucketing)237 hidden_all = []238 for i in range(seq_len):239 hidden_all.append(mx.sym.Concat(*[net_forward[i], net_backward[i]], dim=1))240 net = hidden_all241 return net242# bilistm_2to1243def bi_lstm_unroll_two_input_two_output(net1, net2, num_lstm_layer, seq_len, num_hidden_lstm_list, dropout=0.,244 num_hidden_proj=0,245 lstm_type='fc_lstm',246 is_batchnorm=False,247 is_bucketing=False):248 if num_lstm_layer > 0:249 net_forward = lstm_unroll(net=net1,250 num_lstm_layer=num_lstm_layer,251 seq_len=seq_len,252 num_hidden_lstm_list=num_hidden_lstm_list,253 dropout=dropout,254 num_hidden_proj=num_hidden_proj,255 lstm_type=lstm_type,256 is_batchnorm=is_batchnorm,257 prefix="forward_",258 direction="forward",259 is_bucketing=is_bucketing)260 net_backward = lstm_unroll(net=net2,261 num_lstm_layer=num_lstm_layer,262 seq_len=seq_len,263 num_hidden_lstm_list=num_hidden_lstm_list,264 dropout=dropout,265 num_hidden_proj=num_hidden_proj,266 lstm_type=lstm_type,267 is_batchnorm=is_batchnorm,268 prefix="backward_",269 direction="backward",270 is_bucketing=is_bucketing)271 return net_forward, net_backward272 else:...

Full Screen

Full Screen

my_answers.py

Source:my_answers.py Github

copy

Full Screen

1import numpy as np2class NeuralNetwork(object):3 def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):4 # Set number of nodes in input, hidden and output layers.5 self.input_nodes = input_nodes6 self.hidden_nodes = hidden_nodes7 self.output_nodes = output_nodes8 # Initialize weights9 self.weights_input_to_hidden = np.random.normal(0.0,10 self.input_nodes**-0.5,11 (self.input_nodes, self.hidden_nodes))12 self.weights_hidden_to_output = np.random.normal(0.0,13 self.hidden_nodes**-0.5,14 (self.hidden_nodes, self.output_nodes))15 self.lr = learning_rate16 self.activation_function = self.sigmoid17 def sigmoid(self, x):18 return 1. / (1. + np.exp(-x))19 def train(self, features, targets):20 ''' Train the network on batch of features and targets.21 Arguments22 ---------23 features: 2D array, each row is one data record, each column is a feature24 targets: 1D array of target values25 '''26 n_records = features.shape[0]27 delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)28 delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)29 for X, y in zip(features, targets):30 # Implement the forward pass function below31 final_outputs, hidden_outputs = self.forward_pass_train(X)32 # Implement the backproagation function below33 delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs,34 hidden_outputs,35 X,36 y,37 delta_weights_i_h,38 delta_weights_h_o)39 self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)40 def forward_pass_train(self, X):41 ''' Implement forward pass here.42 Arguments43 ---------44 X: features batch45 '''46 hidden_inputs = np.dot(X, self.weights_input_to_hidden)47 hidden_outputs = self.activation_function(hidden_inputs)48 final_outputs = np.dot(hidden_outputs, self.weights_hidden_to_output)49 return final_outputs, hidden_outputs50 def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):51 ''' Implement backpropagation52 Arguments53 ---------54 final_outputs: output from forward pass55 y: target (i.e. label) batch56 delta_weights_i_h: change in weights from input to hidden layers57 delta_weights_h_o: change in weights from hidden to output layers58 '''59 # Output layer error is the difference between desired target and actual output.60 error = y - final_outputs61 # Calculate the hidden layer's contribution to the error62 hidden_error = np.dot(self.weights_hidden_to_output, error)63 # Backpropagated error terms64 output_error_term = error * 1.65 hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)66 # Weight step (input to hidden)67 delta_weights_i_h += hidden_error_term * X[:, None]68 # Weight step (hidden to output)69 delta_weights_h_o += output_error_term * hidden_outputs[:, None]70 return delta_weights_i_h, delta_weights_h_o71 def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):72 ''' Update weights on gradient descent step73 Arguments74 ---------75 delta_weights_i_h: change in weights from input to hidden layers76 delta_weights_h_o: change in weights from hidden to output layers77 n_records: number of records78 '''79 # update hidden-to-output weights with gradient descent step80 self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records81 # update input-to-hidden weights with gradient descent step82 self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records83 def run(self, features):84 ''' Run a forward pass through the network with input features.85 Arguments86 ---------87 features: 1D array of feature values88 '''89 hidden_inputs = np.dot(features, self.weights_input_to_hidden)90 hidden_outputs = self.activation_function(hidden_inputs)91 final_outputs = np.dot(hidden_outputs, self.weights_hidden_to_output)92 return final_outputs93#########################################################94# Set your hyperparameters here95##########################################################96iterations = 300097learning_rate = 1.98hidden_nodes = 12...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Lemoncheesecake automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful